summaryrefslogtreecommitdiff
path: root/deps/v8
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2014-03-31 14:38:28 +0200
committerFedor Indutny <fedor@indutny.com>2014-04-02 00:05:24 +0400
commit67e078094b53861a5aa7e9354e33487d0bd4f73b (patch)
tree09a706adee1ddb59c1507ee3320de9cb6896135b /deps/v8
parentf984555d47298cfb01b3e55c2861066379306fc3 (diff)
downloadnode-67e078094b53861a5aa7e9354e33487d0bd4f73b.tar.gz
deps: upgrade v8 to 3.25.30
Diffstat (limited to 'deps/v8')
-rw-r--r--deps/v8/.gitignore3
-rw-r--r--deps/v8/AUTHORS4
-rw-r--r--deps/v8/ChangeLog399
-rw-r--r--deps/v8/DEPS2
-rw-r--r--deps/v8/LICENSE2
-rw-r--r--deps/v8/Makefile44
-rw-r--r--deps/v8/Makefile.android60
-rw-r--r--deps/v8/PRESUBMIT.py9
-rw-r--r--deps/v8/build/all.gyp1
-rw-r--r--deps/v8/build/android.gypi34
-rw-r--r--deps/v8/build/features.gypi2
-rw-r--r--deps/v8/build/standalone.gypi30
-rw-r--r--deps/v8/build/toolchain.gypi19
-rw-r--r--[-rwxr-xr-x]deps/v8/include/v8-debug.h0
-rw-r--r--deps/v8/include/v8-profiler.h68
-rw-r--r--deps/v8/include/v8-util.h355
-rw-r--r--deps/v8/include/v8.h573
-rw-r--r--deps/v8/samples/lineprocessor.cc3
-rw-r--r--deps/v8/samples/shell.cc3
-rw-r--r--deps/v8/src/accessors.cc41
-rw-r--r--deps/v8/src/accessors.h2
-rw-r--r--deps/v8/src/allocation-tracker.cc173
-rw-r--r--deps/v8/src/allocation-tracker.h56
-rw-r--r--deps/v8/src/api.cc806
-rw-r--r--deps/v8/src/api.h3
-rw-r--r--deps/v8/src/arm/OWNERS1
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h82
-rw-r--r--deps/v8/src/arm/assembler-arm.cc438
-rw-r--r--deps/v8/src/arm/assembler-arm.h120
-rw-r--r--deps/v8/src/arm/builtins-arm.cc178
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc516
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h2
-rw-r--r--deps/v8/src/arm/constants-arm.h2
-rw-r--r--deps/v8/src/arm/debug-arm.cc12
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc33
-rw-r--r--deps/v8/src/arm/disasm-arm.cc12
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc494
-rw-r--r--deps/v8/src/arm/ic-arm.cc26
-rw-r--r--deps/v8/src/arm/lithium-arm.cc278
-rw-r--r--deps/v8/src/arm/lithium-arm.h234
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc740
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h23
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc167
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h87
-rw-r--r--deps/v8/src/arm/simulator-arm.cc10
-rw-r--r--deps/v8/src/arm/simulator-arm.h4
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc190
-rw-r--r--deps/v8/src/arm64/OWNERS1
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h1229
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc2813
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h2233
-rw-r--r--deps/v8/src/arm64/builtins-arm64.cc1562
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc5743
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.h500
-rw-r--r--deps/v8/src/arm64/codegen-arm64.cc615
-rw-r--r--deps/v8/src/arm64/codegen-arm64.h71
-rw-r--r--deps/v8/src/arm64/constants-arm64.h1271
-rw-r--r--deps/v8/src/arm64/cpu-arm64.cc199
-rw-r--r--deps/v8/src/arm64/cpu-arm64.h107
-rw-r--r--deps/v8/src/arm64/debug-arm64.cc393
-rw-r--r--deps/v8/src/arm64/decoder-arm64-inl.h671
-rw-r--r--deps/v8/src/arm64/decoder-arm64.cc109
-rw-r--r--deps/v8/src/arm64/decoder-arm64.h210
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc388
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc1856
-rw-r--r--deps/v8/src/arm64/disasm-arm64.h115
-rw-r--r--deps/v8/src/arm64/frames-arm64.cc65
-rw-r--r--deps/v8/src/arm64/frames-arm64.h133
-rw-r--r--deps/v8/src/arm64/full-codegen-arm64.cc5015
-rw-r--r--deps/v8/src/arm64/ic-arm64.cc1407
-rw-r--r--deps/v8/src/arm64/instructions-arm64.cc333
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h501
-rw-r--r--deps/v8/src/arm64/instrument-arm64.cc618
-rw-r--r--deps/v8/src/arm64/instrument-arm64.h107
-rw-r--r--deps/v8/src/arm64/lithium-arm64.cc2576
-rw-r--r--deps/v8/src/arm64/lithium-arm64.h3100
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.cc5901
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.h490
-rw-r--r--deps/v8/src/arm64/lithium-gap-resolver-arm64.cc334
-rw-r--r--deps/v8/src/arm64/lithium-gap-resolver-arm64.h90
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h1677
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc5184
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h2310
-rw-r--r--deps/v8/src/arm64/regexp-macro-assembler-arm64.cc1728
-rw-r--r--deps/v8/src/arm64/regexp-macro-assembler-arm64.h315
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc3645
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h908
-rw-r--r--deps/v8/src/arm64/stub-cache-arm64.cc1496
-rw-r--r--deps/v8/src/arm64/utils-arm64.cc112
-rw-r--r--deps/v8/src/arm64/utils-arm64.h135
-rw-r--r--deps/v8/src/array-iterator.js10
-rw-r--r--deps/v8/src/array.js14
-rw-r--r--deps/v8/src/assembler.cc95
-rw-r--r--deps/v8/src/assembler.h87
-rw-r--r--deps/v8/src/assert-scope.cc21
-rw-r--r--deps/v8/src/assert-scope.h129
-rw-r--r--deps/v8/src/ast.cc49
-rw-r--r--deps/v8/src/ast.h90
-rw-r--r--deps/v8/src/atomicops.h33
-rw-r--r--deps/v8/src/atomicops_internals_arm64_gcc.h372
-rw-r--r--deps/v8/src/atomicops_internals_arm_gcc.h237
-rw-r--r--deps/v8/src/atomicops_internals_atomicword_compat.h122
-rw-r--r--deps/v8/src/atomicops_internals_mac.h (renamed from deps/v8/src/atomicops_internals_x86_macosx.h)104
-rw-r--r--deps/v8/src/atomicops_internals_tsan.h194
-rw-r--r--deps/v8/src/atomicops_internals_x86_msvc.h14
-rw-r--r--deps/v8/src/bootstrapper.cc190
-rw-r--r--deps/v8/src/bootstrapper.h1
-rw-r--r--deps/v8/src/builtins.cc503
-rw-r--r--deps/v8/src/builtins.h6
-rw-r--r--deps/v8/src/char-predicates.h21
-rw-r--r--deps/v8/src/checks.cc37
-rw-r--r--deps/v8/src/checks.h26
-rw-r--r--deps/v8/src/circular-queue.h1
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc251
-rw-r--r--deps/v8/src/code-stubs.cc23
-rw-r--r--deps/v8/src/code-stubs.h205
-rw-r--r--deps/v8/src/codegen.cc10
-rw-r--r--deps/v8/src/codegen.h2
-rw-r--r--deps/v8/src/collection.js173
-rw-r--r--deps/v8/src/compilation-cache.cc10
-rw-r--r--deps/v8/src/compilation-cache.h9
-rw-r--r--deps/v8/src/compiler.cc129
-rw-r--r--deps/v8/src/compiler.h94
-rw-r--r--deps/v8/src/contexts.cc14
-rw-r--r--deps/v8/src/contexts.h81
-rw-r--r--deps/v8/src/conversions-inl.h2
-rw-r--r--deps/v8/src/counters.cc8
-rw-r--r--deps/v8/src/d8-debug.cc2
-rw-r--r--deps/v8/src/d8-debug.h1
-rw-r--r--deps/v8/src/d8.cc82
-rw-r--r--deps/v8/src/d8.h4
-rw-r--r--deps/v8/src/date.cc1
-rw-r--r--deps/v8/src/date.h15
-rw-r--r--deps/v8/src/date.js23
-rw-r--r--deps/v8/src/dateparser.h2
-rw-r--r--deps/v8/src/debug.cc47
-rw-r--r--deps/v8/src/deoptimizer.cc170
-rw-r--r--deps/v8/src/deoptimizer.h4
-rw-r--r--deps/v8/src/disassembler.cc2
-rw-r--r--deps/v8/src/elements-kind.cc21
-rw-r--r--deps/v8/src/elements-kind.h17
-rw-r--r--deps/v8/src/elements.cc695
-rw-r--r--deps/v8/src/elements.h55
-rw-r--r--deps/v8/src/execution.cc56
-rw-r--r--deps/v8/src/execution.h9
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc4
-rw-r--r--deps/v8/src/factory.cc150
-rw-r--r--deps/v8/src/factory.h66
-rw-r--r--deps/v8/src/feedback-slots.h110
-rw-r--r--deps/v8/src/flag-definitions.h79
-rw-r--r--deps/v8/src/frames-inl.h7
-rw-r--r--deps/v8/src/frames.cc8
-rw-r--r--deps/v8/src/frames.h21
-rw-r--r--deps/v8/src/full-codegen.cc65
-rw-r--r--deps/v8/src/full-codegen.h43
-rw-r--r--deps/v8/src/func-name-inferrer.cc11
-rw-r--r--deps/v8/src/func-name-inferrer.h4
-rw-r--r--deps/v8/src/global-handles.cc10
-rw-r--r--deps/v8/src/global-handles.h3
-rw-r--r--deps/v8/src/globals.h56
-rw-r--r--deps/v8/src/handles-inl.h3
-rw-r--r--deps/v8/src/handles.cc39
-rw-r--r--deps/v8/src/harmony-array.js4
-rw-r--r--deps/v8/src/harmony-math.js104
-rw-r--r--deps/v8/src/heap-inl.h84
-rw-r--r--deps/v8/src/heap-profiler.cc5
-rw-r--r--deps/v8/src/heap-snapshot-generator.cc294
-rw-r--r--deps/v8/src/heap-snapshot-generator.h62
-rw-r--r--deps/v8/src/heap.cc576
-rw-r--r--deps/v8/src/heap.h203
-rw-r--r--deps/v8/src/hydrogen-bce.cc47
-rw-r--r--deps/v8/src/hydrogen-check-elimination.cc296
-rw-r--r--deps/v8/src/hydrogen-flow-engine.h19
-rw-r--r--deps/v8/src/hydrogen-gvn.cc510
-rw-r--r--deps/v8/src/hydrogen-gvn.h94
-rw-r--r--deps/v8/src/hydrogen-instructions.cc577
-rw-r--r--deps/v8/src/hydrogen-instructions.h636
-rw-r--r--deps/v8/src/hydrogen-load-elimination.cc92
-rw-r--r--deps/v8/src/hydrogen-minus-zero.cc91
-rw-r--r--deps/v8/src/hydrogen-range-analysis.cc99
-rw-r--r--deps/v8/src/hydrogen-range-analysis.h15
-rw-r--r--deps/v8/src/hydrogen-representation-changes.cc10
-rw-r--r--deps/v8/src/hydrogen-store-elimination.cc139
-rw-r--r--deps/v8/src/hydrogen-store-elimination.h (renamed from deps/v8/src/hydrogen-minus-zero.h)27
-rw-r--r--deps/v8/src/hydrogen.cc1267
-rw-r--r--deps/v8/src/hydrogen.h191
-rw-r--r--deps/v8/src/i18n.cc80
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h27
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc30
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h35
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc133
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc500
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h3
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc16
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc35
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc63
-rw-r--r--deps/v8/src/ia32/frames-ia32.h2
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc426
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc26
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc825
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h12
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.cc4
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc403
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h248
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc165
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h30
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc2
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc168
-rw-r--r--deps/v8/src/ic-inl.h61
-rw-r--r--deps/v8/src/ic.cc402
-rw-r--r--deps/v8/src/ic.h115
-rw-r--r--deps/v8/src/icu_util.cc58
-rw-r--r--deps/v8/src/icu_util.h2
-rw-r--r--deps/v8/src/incremental-marking.cc22
-rw-r--r--deps/v8/src/incremental-marking.h6
-rw-r--r--deps/v8/src/interpreter-irregexp.cc22
-rw-r--r--deps/v8/src/isolate.cc189
-rw-r--r--deps/v8/src/isolate.h216
-rw-r--r--deps/v8/src/json-parser.h8
-rw-r--r--deps/v8/src/json-stringifier.h56
-rw-r--r--deps/v8/src/json.js22
-rw-r--r--deps/v8/src/jsregexp.cc20
-rw-r--r--deps/v8/src/libplatform/default-platform.h4
-rw-r--r--deps/v8/src/lithium-allocator-inl.h2
-rw-r--r--deps/v8/src/lithium-allocator.cc2
-rw-r--r--deps/v8/src/lithium-allocator.h4
-rw-r--r--deps/v8/src/lithium-codegen.cc19
-rw-r--r--deps/v8/src/lithium-codegen.h2
-rw-r--r--deps/v8/src/lithium.cc48
-rw-r--r--deps/v8/src/lithium.h157
-rw-r--r--deps/v8/src/liveedit.cc154
-rw-r--r--deps/v8/src/log.cc30
-rw-r--r--deps/v8/src/log.h9
-rw-r--r--deps/v8/src/macro-assembler.h9
-rw-r--r--deps/v8/src/macros.py1
-rw-r--r--deps/v8/src/mark-compact-inl.h5
-rw-r--r--deps/v8/src/mark-compact.cc261
-rw-r--r--deps/v8/src/mark-compact.h23
-rw-r--r--deps/v8/src/messages.cc6
-rw-r--r--deps/v8/src/messages.h1
-rw-r--r--deps/v8/src/messages.js49
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h28
-rw-r--r--deps/v8/src/mips/assembler-mips.cc24
-rw-r--r--deps/v8/src/mips/assembler-mips.h30
-rw-r--r--deps/v8/src/mips/builtins-mips.cc129
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc474
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h2
-rw-r--r--deps/v8/src/mips/debug-mips.cc10
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc33
-rw-r--r--deps/v8/src/mips/frames-mips.h2
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc393
-rw-r--r--deps/v8/src/mips/ic-mips.cc34
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc688
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h12
-rw-r--r--deps/v8/src/mips/lithium-mips.cc252
-rw-r--r--deps/v8/src/mips/lithium-mips.h243
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc109
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h15
-rw-r--r--deps/v8/src/mips/simulator-mips.cc10
-rw-r--r--deps/v8/src/mips/simulator-mips.h4
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc185
-rw-r--r--deps/v8/src/mirror-debugger.js111
-rw-r--r--deps/v8/src/object-observe.js153
-rw-r--r--deps/v8/src/objects-debug.cc25
-rw-r--r--deps/v8/src/objects-inl.h689
-rw-r--r--deps/v8/src/objects-printer.cc68
-rw-r--r--deps/v8/src/objects-visiting-inl.h33
-rw-r--r--deps/v8/src/objects-visiting.h2
-rw-r--r--deps/v8/src/objects.cc1885
-rw-r--r--deps/v8/src/objects.h669
-rw-r--r--deps/v8/src/optimizing-compiler-thread.cc10
-rw-r--r--deps/v8/src/parser.cc2291
-rw-r--r--deps/v8/src/parser.h425
-rw-r--r--deps/v8/src/platform-cygwin.cc4
-rw-r--r--deps/v8/src/platform-freebsd.cc4
-rw-r--r--deps/v8/src/platform-linux.cc7
-rw-r--r--deps/v8/src/platform-macos.cc4
-rw-r--r--deps/v8/src/platform-openbsd.cc4
-rw-r--r--deps/v8/src/platform-posix.cc43
-rw-r--r--deps/v8/src/platform-qnx.cc4
-rw-r--r--deps/v8/src/platform-solaris.cc4
-rw-r--r--deps/v8/src/platform-win32.cc272
-rw-r--r--deps/v8/src/platform.h13
-rw-r--r--deps/v8/src/preparse-data-format.h2
-rw-r--r--deps/v8/src/preparse-data.cc104
-rw-r--r--deps/v8/src/preparse-data.h235
-rw-r--r--deps/v8/src/preparser.cc941
-rw-r--r--deps/v8/src/preparser.h2088
-rw-r--r--deps/v8/src/profile-generator-inl.h2
-rw-r--r--deps/v8/src/promise.js154
-rw-r--r--deps/v8/src/property-details-inl.h51
-rw-r--r--deps/v8/src/property-details.h18
-rw-r--r--deps/v8/src/property.h87
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.cc5
-rw-r--r--deps/v8/src/regexp-macro-assembler.h1
-rw-r--r--deps/v8/src/runtime.cc1363
-rw-r--r--deps/v8/src/runtime.h260
-rw-r--r--deps/v8/src/runtime.js14
-rw-r--r--deps/v8/src/sampler.cc58
-rw-r--r--deps/v8/src/scanner.cc144
-rw-r--r--deps/v8/src/scanner.h211
-rw-r--r--deps/v8/src/scopeinfo.cc8
-rw-r--r--deps/v8/src/scopes.cc65
-rw-r--r--deps/v8/src/scopes.h40
-rw-r--r--deps/v8/src/serialize.cc64
-rw-r--r--deps/v8/src/serialize.h1
-rw-r--r--deps/v8/src/simulator.h2
-rw-r--r--deps/v8/src/spaces.cc78
-rw-r--r--deps/v8/src/spaces.h81
-rw-r--r--deps/v8/src/store-buffer.cc31
-rw-r--r--deps/v8/src/stub-cache.cc99
-rw-r--r--deps/v8/src/stub-cache.h81
-rw-r--r--deps/v8/src/sweeper-thread.cc1
-rw-r--r--deps/v8/src/symbol.js85
-rw-r--r--deps/v8/src/token.h2
-rw-r--r--deps/v8/src/transitions-inl.h1
-rw-r--r--deps/v8/src/type-info.cc124
-rw-r--r--deps/v8/src/type-info.h22
-rw-r--r--deps/v8/src/typedarray.js24
-rw-r--r--deps/v8/src/types.cc115
-rw-r--r--deps/v8/src/types.h205
-rw-r--r--deps/v8/src/typing.cc22
-rw-r--r--deps/v8/src/unicode.cc40
-rw-r--r--deps/v8/src/unicode.h3
-rw-r--r--deps/v8/src/unique.h4
-rw-r--r--deps/v8/src/uri.h9
-rw-r--r--deps/v8/src/utils.cc14
-rw-r--r--deps/v8/src/utils.h103
-rw-r--r--deps/v8/src/v8.cc33
-rw-r--r--deps/v8/src/v8.h2
-rw-r--r--deps/v8/src/v8globals.h7
-rw-r--r--deps/v8/src/v8natives.js40
-rw-r--r--deps/v8/src/variables.cc4
-rw-r--r--deps/v8/src/variables.h2
-rw-r--r--deps/v8/src/version.cc6
-rw-r--r--deps/v8/src/vm-state-inl.h3
-rw-r--r--deps/v8/src/weak_collection.js206
-rw-r--r--deps/v8/src/win32-headers.h1
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h23
-rw-r--r--deps/v8/src/x64/assembler-x64.cc329
-rw-r--r--deps/v8/src/x64/assembler-x64.h761
-rw-r--r--deps/v8/src/x64/builtins-x64.cc340
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc867
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h22
-rw-r--r--deps/v8/src/x64/codegen-x64.cc36
-rw-r--r--deps/v8/src/x64/debug-x64.cc32
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc86
-rw-r--r--deps/v8/src/x64/disasm-x64.cc60
-rw-r--r--deps/v8/src/x64/frames-x64.h2
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc751
-rw-r--r--deps/v8/src/x64/ic-x64.cc112
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc1097
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h7
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.cc12
-rw-r--r--deps/v8/src/x64/lithium-x64.cc435
-rw-r--r--deps/v8/src/x64/lithium-x64.h259
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc790
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h43
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc222
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc248
-rw-r--r--deps/v8/src/zone-allocator.h14
-rw-r--r--deps/v8/src/zone-inl.h29
-rw-r--r--deps/v8/src/zone.cc6
-rw-r--r--deps/v8/src/zone.h12
-rw-r--r--deps/v8/test/benchmarks/benchmarks.status8
-rw-r--r--deps/v8/test/cctest/cctest.gyp15
-rw-r--r--deps/v8/test/cctest/cctest.h74
-rw-r--r--deps/v8/test/cctest/cctest.status61
-rw-r--r--deps/v8/test/cctest/test-accessors.cc15
-rw-r--r--deps/v8/test/cctest/test-api.cc1339
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc21
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc10801
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc76
-rw-r--r--deps/v8/test/cctest/test-atomicops.cc276
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm64.cc189
-rw-r--r--deps/v8/test/cctest/test-code-stubs-x64.cc24
-rw-r--r--deps/v8/test/cctest/test-code-stubs.cc33
-rw-r--r--deps/v8/test/cctest/test-compiler.cc5
-rw-r--r--deps/v8/test/cctest/test-constantpool.cc40
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc16
-rw-r--r--deps/v8/test/cctest/test-date.cc22
-rw-r--r--deps/v8/test/cctest/test-debug.cc144
-rw-r--r--deps/v8/test/cctest/test-decls.cc3
-rw-r--r--deps/v8/test/cctest/test-deoptimization.cc2
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc12
-rw-r--r--deps/v8/test/cctest/test-disasm-arm64.cc1763
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc15
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc110
-rw-r--r--deps/v8/test/cctest/test-fuzz-arm64.cc71
-rw-r--r--deps/v8/test/cctest/test-hashing.cc70
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc167
-rw-r--r--deps/v8/test/cctest/test-heap.cc375
-rw-r--r--deps/v8/test/cctest/test-javascript-arm64.cc266
-rw-r--r--deps/v8/test/cctest/test-js-arm64-variables.cc143
-rw-r--r--deps/v8/test/cctest/test-log.cc4
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-ia32.cc28
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc42
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc140
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc10
-rw-r--r--deps/v8/test/cctest/test-mementos.cc74
-rw-r--r--deps/v8/test/cctest/test-microtask-delivery.cc135
-rw-r--r--deps/v8/test/cctest/test-object-observe.cc194
-rw-r--r--deps/v8/test/cctest/test-parsing.cc737
-rw-r--r--deps/v8/test/cctest/test-platform.cc6
-rw-r--r--deps/v8/test/cctest/test-regexp.cc35
-rw-r--r--deps/v8/test/cctest/test-strings.cc97
-rw-r--r--deps/v8/test/cctest/test-symbols.cc2
-rw-r--r--deps/v8/test/cctest/test-types.cc247
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.cc425
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.h233
-rw-r--r--deps/v8/test/cctest/testcfg.py6
-rw-r--r--deps/v8/test/intl/intl.status3
-rw-r--r--deps/v8/test/message/message.status3
-rw-r--r--deps/v8/test/message/testcfg.py2
-rw-r--r--deps/v8/test/mjsunit/allocation-site-info.js9
-rw-r--r--deps/v8/test/mjsunit/array-constructor-feedback.js29
-rw-r--r--[-rwxr-xr-x]deps/v8/test/mjsunit/array-reduce.js0
-rw-r--r--deps/v8/test/mjsunit/assert-opt-and-deopt.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/compare-map-elim.js (renamed from deps/v8/test/mjsunit/compiler/compare_map_elim.js)0
-rw-r--r--deps/v8/test/mjsunit/compiler/compare-map-elim2.js130
-rw-r--r--deps/v8/test/mjsunit/compiler/compare-objeq-elim.js (renamed from deps/v8/test/mjsunit/compiler/compare_objeq_elim.js)0
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/dead-string-char-code-at.js10
-rw-r--r--deps/v8/test/mjsunit/compiler/division-by-constant.js131
-rw-r--r--deps/v8/test/mjsunit/compiler/smi-stores-opt.js49
-rw-r--r--deps/v8/test/mjsunit/compiler/store-elimination.js94
-rw-r--r--deps/v8/test/mjsunit/compiler/to-fast-properties.js43
-rw-r--r--deps/v8/test/mjsunit/constant-fold-control-instructions.js47
-rw-r--r--deps/v8/test/mjsunit/debug-scopes.js27
-rw-r--r--deps/v8/test/mjsunit/debug-script.js2
-rw-r--r--deps/v8/test/mjsunit/dehoisted-array-index.js163
-rw-r--r--deps/v8/test/mjsunit/deopt-with-fp-regs.js90
-rw-r--r--deps/v8/test/mjsunit/div-mod.js6
-rw-r--r--deps/v8/test/mjsunit/double-intrinsics.js36
-rw-r--r--deps/v8/test/mjsunit/elements-kind.js113
-rw-r--r--deps/v8/test/mjsunit/es6/math-cbrt.js27
-rw-r--r--deps/v8/test/mjsunit/es6/math-clz32.js36
-rw-r--r--deps/v8/test/mjsunit/es6/math-expm1.js38
-rw-r--r--deps/v8/test/mjsunit/es6/math-fround.js99
-rw-r--r--deps/v8/test/mjsunit/es6/math-hyperbolic.js (renamed from deps/v8/test/mjsunit/harmony/math-hyperbolic.js)8
-rw-r--r--deps/v8/test/mjsunit/es6/math-hypot.js (renamed from deps/v8/test/mjsunit/harmony/math-hypot.js)0
-rw-r--r--deps/v8/test/mjsunit/es6/math-log1p.js41
-rw-r--r--deps/v8/test/mjsunit/es6/math-log2-log10.js (renamed from deps/v8/test/mjsunit/harmony/math-log2-log10.js)0
-rw-r--r--deps/v8/test/mjsunit/es6/math-sign.js (renamed from deps/v8/test/mjsunit/harmony/math-sign.js)0
-rw-r--r--deps/v8/test/mjsunit/es6/math-trunc.js (renamed from deps/v8/test/mjsunit/harmony/math-trunc.js)0
-rw-r--r--deps/v8/test/mjsunit/es6/microtask-delivery.js168
-rw-r--r--deps/v8/test/mjsunit/es6/promises.js (renamed from deps/v8/test/mjsunit/harmony/promises.js)210
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-2034.js (renamed from deps/v8/test/mjsunit/regress/regress-2034.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-2156.js (renamed from deps/v8/test/mjsunit/regress/regress-2156.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-2829.js (renamed from deps/v8/test/mjsunit/regress/regress-2829.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/weak_collections.js333
-rw-r--r--deps/v8/test/mjsunit/es7/object-observe.js (renamed from deps/v8/test/mjsunit/harmony/object-observe.js)2
-rw-r--r--deps/v8/test/mjsunit/external-array.js1
-rw-r--r--deps/v8/test/mjsunit/function-arguments-duplicate.js36
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives-part1.js35
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives-part2.js36
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives-part3.js35
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives-part4.js35
-rw-r--r--deps/v8/test/mjsunit/getters-on-elements.js7
-rw-r--r--deps/v8/test/mjsunit/harmony/block-let-declaration.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/collections.js20
-rw-r--r--deps/v8/test/mjsunit/harmony/generators-objects.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/generators-parsing.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/private.js60
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-example-membrane.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-function.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-173361.js (renamed from deps/v8/test/mjsunit/regress/regress-173361.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-2186.js (renamed from deps/v8/test/mjsunit/regress/regress-2186.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-2219.js (renamed from deps/v8/test/mjsunit/regress/regress-2219.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-2225.js (renamed from deps/v8/test/mjsunit/regress/regress-2225.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-2243.js (renamed from deps/v8/test/mjsunit/regress/regress-2243.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-2322.js (renamed from deps/v8/test/mjsunit/regress/regress-2322.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-2681.js (renamed from deps/v8/test/mjsunit/regress/regress-2681.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-2691.js (renamed from deps/v8/test/mjsunit/regress/regress-2691.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-343928.js22
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-248025.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-248025.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-346141.js11
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-347528.js36
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-lookup-transition.js14
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-observe-empty-double-array.js (renamed from deps/v8/test/mjsunit/regress/regress-observe-empty-double-array.js)2
-rw-r--r--deps/v8/test/mjsunit/harmony/set-prototype-of.js (renamed from deps/v8/test/mjsunit/set-prototype-of.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/symbols.js153
-rw-r--r--deps/v8/test/mjsunit/invalid-lhs.js52
-rw-r--r--deps/v8/test/mjsunit/math-floor-of-div.js11
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status105
-rw-r--r--deps/v8/test/mjsunit/neuter-twice.js9
-rw-r--r--[-rwxr-xr-x]deps/v8/test/mjsunit/pixel-array-rounding.js0
-rw-r--r--deps/v8/test/mjsunit/proto-accessor.js144
-rw-r--r--deps/v8/test/mjsunit/readonly.js9
-rw-r--r--[-rwxr-xr-x]deps/v8/test/mjsunit/regexp-capture-3.js0
-rw-r--r--[-rwxr-xr-x]deps/v8/test/mjsunit/regexp-capture.js0
-rw-r--r--deps/v8/test/mjsunit/regress-3225.js48
-rw-r--r--deps/v8/test/mjsunit/regress-keyed-store-non-strict-arguments.js16
-rw-r--r--deps/v8/test/mjsunit/regress-sync-optimized-lists.js45
-rw-r--r--deps/v8/test/mjsunit/regress/compare-map-elim1.js57
-rw-r--r--deps/v8/test/mjsunit/regress/comparison-in-effect-context-deopt.js47
-rw-r--r--deps/v8/test/mjsunit/regress/number-named-call-deopt.js41
-rw-r--r--deps/v8/test/mjsunit/regress/polymorphic-accessor-test-context.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2273.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2318.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2564.js2
-rw-r--r--[-rwxr-xr-x]deps/v8/test/mjsunit/regress/regress-3032.js0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3135.js73
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3138.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3158.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3159.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3183.js96
-rw-r--r--deps/v8/test/mjsunit/regress/regress-319722-ArrayBuffer.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-319722-TypedArrays.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3204.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3220.js30
-rw-r--r--deps/v8/test/mjsunit/regress/regress-330046.js (renamed from deps/v8/test/mjsunit/regress-330046.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-333594.js (renamed from deps/v8/test/mjsunit/regress-333594.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-343609.js66
-rw-r--r--deps/v8/test/mjsunit/regress/regress-346587.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-347530.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-347542.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-347906.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-347912.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-347914.js89
-rw-r--r--deps/v8/test/mjsunit/regress/regress-348280.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-349870.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-349885.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-350865.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-350887.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-351261.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-351263.js37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-351315.js49
-rw-r--r--deps/v8/test/mjsunit/regress/regress-351319.js39
-rw-r--r--deps/v8/test/mjsunit/regress/regress-352059.js35
-rw-r--r--deps/v8/test/mjsunit/regress/regress-353551.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-354357.js38
-rw-r--r--deps/v8/test/mjsunit/regress/regress-354433.js54
-rw-r--r--deps/v8/test/mjsunit/regress/regress-355485.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-355523.js37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-356053.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-356589.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-357108.js20
-rw-r--r--[-rwxr-xr-x]deps/v8/test/mjsunit/regress/regress-485.js0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-check-eliminate-loop-phis.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-cr-344285.js37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-347903.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-349853.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-350434.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-350864.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-350867.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-350890.js42
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-351262.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-351320.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-351658.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-352058.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-352586.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-354391.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-dictionary-to-fast-arguments.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-fast-empty-string.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-force-representation.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-is-smi-repr.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-keyed-store-global.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-migrate-callbacks.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-prepare-break-while-recompile.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-sort-arguments.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-store-global-proxy.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-store-heapobject.js27
-rw-r--r--deps/v8/test/mjsunit/regress/setvalueof-deopt.js42
-rw-r--r--deps/v8/test/mjsunit/regress/string-set-char-deopt.js85
-rw-r--r--deps/v8/test/mjsunit/shift-for-integer-div.js13
-rw-r--r--[-rwxr-xr-x]deps/v8/test/mjsunit/simple-constructor.js0
-rw-r--r--deps/v8/test/mjsunit/smi-mul-const.js87
-rw-r--r--deps/v8/test/mjsunit/string-case.js16
-rw-r--r--[-rwxr-xr-x]deps/v8/test/mjsunit/string-match.js0
-rw-r--r--deps/v8/test/mjsunit/string-oom-array-join.js14
-rw-r--r--deps/v8/test/mjsunit/string-oom-concat.js12
-rw-r--r--deps/v8/test/mjsunit/string-oom-replace-global-regexp-with-string.js26
-rw-r--r--deps/v8/test/mjsunit/string-oom-replace-regexp-global-with-function.js14
-rw-r--r--[-rwxr-xr-x]deps/v8/test/mjsunit/string-slices.js0
-rw-r--r--[-rwxr-xr-x]deps/v8/test/mjsunit/substr.js0
-rw-r--r--deps/v8/test/mjsunit/test-hidden-string.js11
-rw-r--r--deps/v8/test/mjsunit/third_party/array-isarray.js48
-rw-r--r--deps/v8/test/mjsunit/third_party/array-splice-webkit.js62
-rw-r--r--deps/v8/test/mjsunit/third_party/string-trim.js107
-rw-r--r--deps/v8/test/mjsunit/value-wrapper-accessor.js2
-rw-r--r--deps/v8/test/mjsunit/whitespaces.js115
-rw-r--r--deps/v8/test/mozilla/mozilla.status44
-rw-r--r--deps/v8/test/mozilla/testcfg.py5
-rw-r--r--deps/v8/test/preparser/preparser.status4
-rw-r--r--deps/v8/test/promises-aplus/README29
-rw-r--r--deps/v8/test/promises-aplus/lib/adapter.js41
-rw-r--r--deps/v8/test/promises-aplus/lib/assert.js97
-rw-r--r--deps/v8/test/promises-aplus/lib/global.js76
-rw-r--r--deps/v8/test/promises-aplus/lib/mocha.js264
-rw-r--r--deps/v8/test/promises-aplus/lib/require.js (renamed from deps/v8/test/mjsunit/limit-locals.js)41
-rw-r--r--deps/v8/test/promises-aplus/lib/run-tests.js29
-rw-r--r--deps/v8/test/promises-aplus/promises-aplus.status34
-rw-r--r--deps/v8/test/promises-aplus/testcfg.py148
-rw-r--r--deps/v8/test/test262/README10
-rw-r--r--deps/v8/test/test262/test262.status4
-rw-r--r--deps/v8/test/test262/testcfg.py17
-rw-r--r--deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt4
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-already-rejected-expected.txt9
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-already-rejected.js41
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-already-resolved-expected.txt9
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-already-resolved.js43
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-catch-expected.txt15
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-catch.js71
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-chained-then-expected.txt19
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-chained-then.js72
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-exception-expected.txt10
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-exception.js43
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-init-callback-receiver-expected.txt9
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-init-callback-receiver.js32
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-init-expected.txt19
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-init.js70
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-onFulfilled-deep-expected.txt10
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-onFulfilled-deep.js42
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-onRejected-deep-expected.txt10
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-onRejected-deep.js42
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-reject-expected.txt12
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-reject.js53
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-resolve-chain-expected.txt10
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-resolve-chain.js60
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-resolve-expected.txt10
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-resolve-state-expected.txt12
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-resolve-state.js53
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-resolve-with-itself-expected.txt10
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-resolve-with-itself.js40
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-resolve-with-then-exception-expected.txt11
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-resolve-with-then-exception.js43
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-resolve-with-then-fulfill-expected.txt13
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-resolve-with-then-fulfill.js51
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-resolve-with-then-reject-expected.txt13
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-resolve-with-then-reject.js51
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-resolve.js42
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-simple-expected.txt11
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-simple.js46
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-static-all-expected.txt32
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-static-all.js117
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-static-cast-expected.txt14
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-static-cast.js56
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-static-race-expected.txt21
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-static-race.js108
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-static-reject-expected.txt10
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-static-reject.js38
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-static-resolve-expected.txt10
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-static-resolve.js43
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-then-callback-receiver-expected.txt12
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-then-callback-receiver.js47
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-then-expected.txt17
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-then-without-callbacks-expected.txt9
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-then-without-callbacks.js35
-rw-r--r--deps/v8/test/webkit/fast/js/Promise-then.js68
-rw-r--r--deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt24
-rw-r--r--deps/v8/test/webkit/fast/js/function-toString-parentheses-expected.txt174
-rw-r--r--deps/v8/test/webkit/fast/js/kde/exception_propagation-expected.txt1
-rw-r--r--deps/v8/test/webkit/fast/js/kde/exception_propagation.js20
-rw-r--r--deps/v8/test/webkit/fast/js/modify-non-references-expected.txt12
-rw-r--r--deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt4
-rw-r--r--deps/v8/test/webkit/parser-xml-close-comment-expected.txt6
-rw-r--r--deps/v8/test/webkit/string-trim-expected.txt44
-rw-r--r--deps/v8/test/webkit/toString-prefix-postfix-preserve-parens-expected.txt36
-rw-r--r--deps/v8/test/webkit/toString-prefix-postfix-preserve-parens.js5
-rw-r--r--deps/v8/test/webkit/webkit.status6
-rwxr-xr-xdeps/v8/tools/bash-completion.sh4
-rw-r--r--deps/v8/tools/blink_tests/TestExpectations2
-rwxr-xr-xdeps/v8/tools/cross_build_gcc.sh72
-rwxr-xr-xdeps/v8/tools/draw_instruction_graph.sh130
-rw-r--r--deps/v8/tools/gcmole/gcmole.lua6
-rwxr-xr-xdeps/v8/tools/grokdump.py1151
-rw-r--r--deps/v8/tools/gyp/v8.gyp69
-rw-r--r--deps/v8/tools/lexer-shell.cc42
-rw-r--r--deps/v8/tools/lexer-shell.gyp23
-rwxr-xr-xdeps/v8/tools/merge-to-branch.sh1
-rw-r--r--deps/v8/tools/parser-shell.cc171
-rwxr-xr-xdeps/v8/tools/push-to-trunk.sh412
-rwxr-xr-xdeps/v8/tools/push-to-trunk/auto_push.py156
-rwxr-xr-xdeps/v8/tools/push-to-trunk/auto_roll.py217
-rwxr-xr-xdeps/v8/tools/push-to-trunk/chromium_roll.py170
-rw-r--r--deps/v8/tools/push-to-trunk/common_includes.py240
-rw-r--r--deps/v8/tools/push-to-trunk/git_recipes.py168
-rwxr-xr-xdeps/v8/tools/push-to-trunk/merge_to_branch.py331
-rwxr-xr-xdeps/v8/tools/push-to-trunk/push_to_trunk.py563
-rwxr-xr-xdeps/v8/tools/push-to-trunk/script_test.py54
-rw-r--r--deps/v8/tools/push-to-trunk/test_scripts.py758
-rwxr-xr-xdeps/v8/tools/run-deopt-fuzzer.py4
-rwxr-xr-xdeps/v8/tools/run-tests.py20
-rw-r--r--deps/v8/tools/shell-utils.h67
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py6
-rw-r--r--deps/v8/tools/testrunner/local/utils.py2
-rw-r--r--deps/v8/tools/v8heapconst.py288
689 files changed, 112826 insertions, 22900 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index d554ec65d..de51f8a1e 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -22,6 +22,7 @@
*~
.cpplint-cache
.d8_history
+.*.sw?
bsuite
d8
d8_g
@@ -46,7 +47,7 @@ shell_g
/test/mozilla/data
/test/mozilla/downloaded_*
/test/test262/data
-/test/test262/test262-*
+/test/test262/tc39-test262-*
/third_party
/tools/jsfunfuzz
/tools/jsfunfuzz.zip
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 597b0ff0a..4ef2bcca3 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -17,8 +17,10 @@ Opera Software ASA
Akinori MUSHA <knu@FreeBSD.org>
Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexander Karpinsky <homm86@gmail.com>
+Alexandre Rames <alexandre.rames@arm.com>
Alexandre Vassalotti <avassalotti@gmail.com>
Andreas Anyuru <andreas.anyuru@gmail.com>
+Baptiste Afsa <baptiste.afsa@arm.com>
Bert Belder <bertbelder@gmail.com>
Burcu Dogan <burcujdogan@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
@@ -31,6 +33,7 @@ Fedor Indutny <fedor@indutny.com>
Filipe David Manana <fdmanana@gmail.com>
Haitao Feng <haitao.feng@intel.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
+Jacob Bramley <jacob.bramley@arm.com>
Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net>
@@ -59,6 +62,7 @@ Sandro Santilli <strk@keybit.net>
Sanjoy Das <sanjoy@playingwithpointers.com>
Subrato K De <subratokde@codeaurora.org>
Tobias Burnus <burnus@net-b.de>
+Vincent Belliard <vincent.belliard@arm.com>
Vlad Burlik <vladbph@gmail.com>
Xi Qian <xi.qian@intel.com>
Yuqiang Xian <yuqiang.xian@intel.com>
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 1d3f139e1..879515d74 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,402 @@
+2014-03-28: Version 3.25.30
+
+ NativeContext::map_cache reference should be strong in heap snapshots
+ (Chromium issue 357060).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-27: Version 3.25.29
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-27: Version 3.25.28
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-26: Version 3.25.27
+
+ Promise constructor should not be enumerable (Chromium issue 352597).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-26: Version 3.25.26
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-25: Version 3.25.25
+
+ Roll ICU 239289:258359 and add support for external ICU data tables
+ (issue 3142, Chromium issue 72633).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-25: Version 3.25.24
+
+ Add support for per-isolate private symbols.
+
+ No longer OOM on invalid string length (issue 3060).
+
+ Remove Failure::OutOfMemory propagation and
+ V8::IgnoreOutOfMemoryException (issue 3060).
+
+ Tentative Windows dll build fix: Don't V8_EXPORT ScriptCompiler::Source
+ (issue 3228).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-24: Version 3.25.23
+
+ Rename A64 port to ARM64 port (Chromium issue 354405).
+
+ Fix missing access check in Runtime_SetPrototype (Chromium issue
+ 354123).
+
+ Fix polymorphic hydrogen handling of SLOPPY_ARGUMENTS_ELEMENTS (Chromium
+ issue 354391).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-20: Version 3.25.22
+
+ Increase the "local variables in a function" limit (issue 3205).
+
+ Implement ES6 symbol registry and predefined symbols.
+
+ Throw exception on invalid string length instead of OOM (Chromium issue
+ 349329).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-20: Version 3.25.21
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-20: Version 3.25.20
+
+ Fix polymorphic keyed loads for SLOPPY_ARGUMENTS_ELEMENTS (Chromium
+ issue 350867).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-19: Version 3.25.19
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-19: Version 3.25.18
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-19: Version 3.25.17
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-18: Version 3.25.16
+
+ Apply numeric casts correctly in typed arrays and related code (Chromium
+ issue 353004).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-18: Version 3.25.15
+
+ Don't generate keyed store ICs for global proxies (Chromium issue
+ 352983).
+
+ MIPS: Make invalid LHSs a parse-time (reference) error (Chromium issue
+ 351658).
+
+ Make invalid LHSs a parse-time (reference) error (Chromium issue
+ 351658).
+
+ Add Promises/A+ Compliance Test Suite (Chromium issue 347095).
+
+ Split Promise API into Promise/Resolver.
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-17: Version 3.25.14
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-17: Version 3.25.13
+
+ Move profiler callback interfaces from v8.h to v8-profiler.h.
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-14: Version 3.25.12
+
+ PromiseCoerce should deal with an error during accessing "then"
+ (Chromium issue 347095).
+
+ Propagate updated offsets in BoundsCheckBbData (Chromium issue 350863).
+
+ Add regression test for range analysis bug (issue 3204).
+
+ Continued fix for 351257. Reusing the feedback vector is too complex
+ (Chromium issue 351257).
+
+ StopCpuProfiling should return non-const CpuProfile (issue 3213).
+
+ Allow for compiling with xcode 5.1 (which doesn't have gcc anymore).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-14: Version 3.25.11
+
+ MIPS: Remove uses of CanBeNegative() in HMod (issue 3204).
+
+ MIPS: Remove uses of RangeCanInclude() in flooring division by power of
+ 2 (issue 3204).
+
+ MIPS: Fix uses of range analysis results in HChange (issue 3204).
+
+ Make translation of modulus operation '--stress-opt'-proof (Chromium
+ issue 352059).
+
+ Remove uses of CanBeNegative() in HMod (issue 3204).
+
+ Remove uses of RangeCanInclude() in flooring division by power of 2
+ (issue 3204).
+
+ Fix uses of range analysis results in HChange (issue 3204).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-14: Version 3.25.10
+
+ This version was not committed due to script failures.
+
+
+2014-03-13: Version 3.25.9
+
+ Reland "Enable Object.observe by default" again (issue 2409).
+
+ Use intrinsics for builtin ArrayBuffer property accesses (Chromium issue
+ 351787).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-12: Version 3.25.8
+
+ Fix HIsSmiAndBranch::KnownSuccessorBlock() by deleting it (Chromium
+ issue 351320).
+
+ Fix handling of polymorphic array accesses with constant index (Chromium
+ issue 351319).
+
+ Fix lazy deopt after tagged binary ops (Chromium issue 350434).
+
+ MIPS: Cleanup some of the range uses in ModI/DivI (issue 3204).
+
+ Fix issue with getOwnPropertySymbols and hidden properties (Chromium
+ issue 350864).
+
+ Cleanup some of the range uses in ModI/DivI (issue 3204).
+
+ PromiseCoerce should ignore primitive values (Chromium issue 347095).
+
+ Use a per-isolate cache for the date object JS bits (Chromium issue
+ 348856).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-11: Version 3.25.7
+
+ Promise.all and Promise.race should reject non-array parameter (Chromium
+ issue 347453).
+
+ Promise.all and Promise race should use "then" rather than "chain"
+ (Chromium issue 347427).
+
+ Merge the "Compute Minus Zero Checks" phase into the range analysis
+ (issue 3204).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-10: Version 3.25.6
+
+ Replace the recursion in PropagateMinusZeroChecks() with a loop and a
+ worklist (issue 3204).
+
+ Reland "Enable Object.observe by default" (issue 2409).
+
+ Enable Object.observe by default (issue 2409).
+
+ AllocationTracker now maintains a map from address range to stack trace
+ that allocated the range. When snapshot is generated the map is used to
+ find construction stack trace for an object using its address (Chromium
+ issue 277984).
+
+ Introduce Runtime_GetAllScopesDetails to get all scopes at once for a
+ frame (Chromium issue 340285).
+
+ Reduce heavy runtime calls from debug mirrors (Chromium issue 340285).
+
+ Check and clear date cache in DateCurrentTime, DateLocalTimezone and
+ getTimezoneOffset (Chromium issue 142141).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-06: Version 3.25.5
+
+ Fix HConstants with Smi-ranged HeapNumber values (Chromium issue
+ 349878).
+
+ Fix issues with JSON stringify replacer array (issues 3200, 3201).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-05: Version 3.25.4
+
+ x64: Fix LMathMinMax for constant Smi right-hand operands (Chromium
+ issue 349079).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-03-04: Version 3.25.3
+
+ Clear optimized code cache in shared function info when code gets
+ deoptimized (Chromium issue 343609).
+
+ Fixed constant folding for Math.clz32 (Chromium issue 347906).
+
+ Fix JSObject::PrintTransitions (Chromium issue 347912).
+
+ Fix handling of constant global variable assignments (Chromium issue
+ 347904).
+
+ Removed bogus ASSERT (Chromium issue 347542).
+
+ Mark HCompareMap as having Tagged representation (Chromium issue
+ 346636).
+
+ Fix crasher in Object.getOwnPropertySymbols (Chromium issue 346141).
+
+ Fix the bit massaging code in CompleteParserRecorder::WriteNumber
+ (Chromium issue 346221).
+
+ Don't eliminate loads with incompatible types or representations
+ (Chromium issue 346343).
+
+ Check that after a weak callback, the handle is either dead or strong
+ (Chromium issue 346061).
+
+ Lazy preparsing vs. lazy parsing fix (Chromium issue 346207).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-25: Version 3.25.2
+
+ Fix the bit massaging code in CompleteParserRecorder::WriteNumber
+ (Chromium issue 346221).
+
+ Revert r19455 "Load target types and handlers before IC computation."
+ (Chromium issue 346149).
+
+ Don't eliminate loads with incompatible types or representations
+ (Chromium issue 346343).
+
+ Fix for a smi stores optimization on x64 with a regression test
+ (Chromium issue 345715).
+
+ Check that after a weak callback, the handle is either dead or strong
+ (Chromium issue 346061).
+
+ negative bounds checking on realm calls (Chromium issue 344285).
+
+ Lazy preparsing vs. lazy parsing fix (Chromium issue 346207).
+
+ Fix optimistic BCE to back off after deopt (issue 3176).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-21: Version 3.25.1
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-19: Version 3.25.0
+
+ ES6: Tighten up Object.prototype.__proto__ (issue 3064).
+
+ Fix Hydrogen bounds check elimination (Chromium issue 344186).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-19: Version 3.24.40
+
+ A64: Let the MacroAssembler resolve branches to distant targets (issue
+ 3148).
+
+ Fixed and improved code for integral division. Fixed and extended tests
+ (issue 3151).
+
+ MIPS: Fix assignment of function name constant (issue 3138).
+
+ Fix assignment of function name constant (issue 3138).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-14: Version 3.24.39
+
+ Introduce --job-based-sweeping flag and use individual jobs for sweeping
+ if set (issue 3104).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-13: Version 3.24.38
+
+ Merge experimental/a64 to bleeding_edge (issue 3113).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-12: Version 3.24.37
+
+ Fix spec violations in JSON.stringify wrt replacer array (issue 3135).
+
+ Performance and stability improvements on all platforms.
+
+
+2014-02-11: Version 3.24.36
+
+ Fix inconsistencies wrt whitespaces (issue 3109).
+
+ Performance and stability improvements on all platforms.
+
+
2014-02-10: Version 3.24.35
Fix inconsistencies wrt whitespaces (issue 3109).
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 66d21eb36..353c5c8b3 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -8,7 +8,7 @@ deps = {
"http://gyp.googlecode.com/svn/trunk@1831",
"v8/third_party/icu":
- "https://src.chromium.org/chrome/trunk/deps/third_party/icu46@239289",
+ "https://src.chromium.org/chrome/trunk/deps/third_party/icu46@258359",
}
deps_os = {
diff --git a/deps/v8/LICENSE b/deps/v8/LICENSE
index 2e516bab6..2f5bce836 100644
--- a/deps/v8/LICENSE
+++ b/deps/v8/LICENSE
@@ -26,7 +26,7 @@ are:
These libraries have their own licenses; we recommend you read them,
as their terms may differ from the terms below.
-Copyright 2006-2012, the V8 project authors. All rights reserved.
+Copyright 2014, the V8 project authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index 2f47fa9a8..cdf5d7483 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -136,7 +136,16 @@ endif
# deprecation_warnings=on
ifeq ($(deprecationwarnings), on)
GYPFLAGS += -Dv8_deprecation_warnings=1
-endif
+endif
+# asan=/path/to/clang++
+ifneq ($(strip $(asan)),)
+ GYPFLAGS += -Dasan=1
+ export CXX="$(asan)"
+ export CXX_host="$(asan)"
+ export LINK="$(asan)"
+ export ASAN_SYMBOLIZER_PATH="$(dir $(asan))llvm-symbolizer"
+endif
+
# arm specific flags.
# arm_version=<number | "default">
ifneq ($(strip $(arm_version)),)
@@ -223,11 +232,11 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
-ARCHES = ia32 x64 arm mipsel
+ARCHES = ia32 x64 arm arm64 mipsel
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
-ANDROID_ARCHES = android_ia32 android_arm android_mipsel
+ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel
NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
@@ -247,13 +256,15 @@ NACL_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(NACL_ARCHES)))
# Generates corresponding test targets, e.g. "ia32.release.check".
CHECKS = $(addsuffix .check,$(BUILDS))
+QUICKCHECKS = $(addsuffix .quickcheck,$(BUILDS))
ANDROID_CHECKS = $(addsuffix .check,$(ANDROID_BUILDS))
NACL_CHECKS = $(addsuffix .check,$(NACL_BUILDS))
# File where previously used GYPFLAGS are stored.
ENVFILE = $(OUTDIR)/environment
.PHONY: all check clean dependencies $(ENVFILE).new native \
- qc quickcheck \
+ qc quickcheck $(QUICKCHECKS) \
+ $(addsuffix .quickcheck,$(MODES)) $(addsuffix .quickcheck,$(ARCHES)) \
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
$(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS) \
@@ -332,6 +343,18 @@ $(CHECKS): $$(basename $$@)
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) $(TESTFLAGS)
+$(addsuffix .quickcheck,$(MODES)): $$(basename $$@)
+ @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
+ --mode=$(basename $@) $(TESTFLAGS) --quickcheck
+
+$(addsuffix .quickcheck,$(ARCHES)): $$(basename $$@)
+ @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
+ --arch=$(basename $@) $(TESTFLAGS) --quickcheck
+
+$(QUICKCHECKS): $$(basename $$@)
+ @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
+ --arch-and-mode=$(basename $@) $(TESTFLAGS) --quickcheck
+
$(addsuffix .sync, $(ANDROID_BUILDS)): $$(basename $$@)
@tools/android-sync.sh $(basename $@) $(OUTDIR) \
$(shell pwd) $(ANDROID_V8)
@@ -358,12 +381,17 @@ native.check: native
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
--arch-and-mode=. $(TESTFLAGS)
-FASTTESTMODES = ia32.release,x64.release,ia32.optdebug,x64.optdebug,arm.optdebug
+SUPERFASTTESTMODES = ia32.release
+FASTTESTMODES = $(SUPERFASTTESTMODES),x64.release,ia32.optdebug,x64.optdebug,arm.optdebug,arm64.release
+FASTCOMPILEMODES = $(FASTTESTMODES),arm64.optdebug
COMMA = ,
EMPTY =
SPACE = $(EMPTY) $(EMPTY)
-quickcheck: $(subst $(COMMA),$(SPACE),$(FASTTESTMODES))
+quickcheck: $(subst $(COMMA),$(SPACE),$(FASTCOMPILEMODES))
+ tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
+ --arch-and-mode=$(SUPERFASTTESTMODES) $(TESTFLAGS) --quickcheck \
+ --download-data mozilla webkit
tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(FASTTESTMODES) $(TESTFLAGS) --quickcheck
qc: quickcheck
@@ -392,7 +420,7 @@ $(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \
-Dv8_target_arch=$(subst .,,$(suffix $(basename $@))) \
- -Dv8_optimized_debug=$(if $(findstring optdebug,$@),2,0) \
+ $(if $(findstring optdebug,$@),-Dv8_optimized_debug=2,) \
-S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS)
$(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
@@ -446,4 +474,4 @@ dependencies:
--revision 1831
svn checkout --force \
https://src.chromium.org/chrome/trunk/deps/third_party/icu46 \
- third_party/icu --revision 239289
+ third_party/icu --revision 258359
diff --git a/deps/v8/Makefile.android b/deps/v8/Makefile.android
index fad5fe994..f4e144f28 100644
--- a/deps/v8/Makefile.android
+++ b/deps/v8/Makefile.android
@@ -26,7 +26,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile
-ANDROID_ARCHES = android_ia32 android_arm android_mipsel
+ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel
MODES = release debug
# Generates all combinations of ANDROID ARCHES and MODES,
@@ -49,24 +49,40 @@ endif
ifeq ($(ARCH), android_arm)
DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm
DEFINES += arm_neon=0 arm_version=7
- TOOLCHAIN_ARCH = arm-linux-androideabi-4.6
+ TOOLCHAIN_ARCH = arm-linux-androideabi
+ TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
+ TOOLCHAIN_VER = 4.6
else
- ifeq ($(ARCH), android_mipsel)
- DEFINES = target_arch=mipsel v8_target_arch=mipsel android_target_arch=mips
- DEFINES += mips_arch_variant=mips32r2
- TOOLCHAIN_ARCH = mipsel-linux-android-4.6
+ ifeq ($(ARCH), android_arm64)
+ DEFINES = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64
+ TOOLCHAIN_ARCH = aarch64-linux-android
+ TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
+ TOOLCHAIN_VER = 4.8
else
- ifeq ($(ARCH), android_ia32)
- DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
- TOOLCHAIN_ARCH = x86-4.6
+ ifeq ($(ARCH), android_mipsel)
+ DEFINES = target_arch=mipsel v8_target_arch=mipsel
+ DEFINES += android_target_arch=mips mips_arch_variant=mips32r2
+ TOOLCHAIN_ARCH = mipsel-linux-android
+ TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
+ TOOLCHAIN_VER = 4.6
+
else
- $(error Target architecture "${ARCH}" is not supported)
+ ifeq ($(ARCH), android_ia32)
+ DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
+ TOOLCHAIN_ARCH = x86
+ TOOLCHAIN_PREFIX = i686-linux-android
+ TOOLCHAIN_VER = 4.6
+ else
+ $(error Target architecture "${ARCH}" is not supported)
+ endif
endif
endif
endif
-TOOLCHAIN_PATH = ${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}/prebuilt
+TOOLCHAIN_PATH = \
+ ${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}-${TOOLCHAIN_VER}/prebuilt
ANDROID_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR}
+
ifeq ($(wildcard $(ANDROID_TOOLCHAIN)),)
$(error Cannot find Android toolchain in "${ANDROID_TOOLCHAIN}". Please \
check that ANDROID_NDK_ROOT and ANDROID_NDK_HOST_ARCH are set \
@@ -79,23 +95,23 @@ DEFINES += host_os=${HOST_OS}
.SECONDEXPANSION:
$(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$@
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
- CXX="$(ANDROID_TOOLCHAIN)/bin/*-g++" \
- AR="$(ANDROID_TOOLCHAIN)/bin/*-ar" \
- RANLIB="$(ANDROID_TOOLCHAIN)/bin/*-ranlib" \
- CC="$(ANDROID_TOOLCHAIN)/bin/*-gcc" \
- LD="$(ANDROID_TOOLCHAIN)/bin/*-ld" \
- LINK="$(ANDROID_TOOLCHAIN)/bin/*-g++" \
- BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
- python -c "print raw_input().capitalize()") \
- builddir="$(shell pwd)/$(OUTDIR)/$@"
+ CXX="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-g++" \
+ AR="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ar" \
+ RANLIB="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ranlib" \
+ CC="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-gcc" \
+ LD="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ld" \
+ LINK="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-g++" \
+ BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
+ python -c "print raw_input().capitalize()") \
+ builddir="$(shell pwd)/$(OUTDIR)/$@"
# Android GYP file generation targets.
ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_BUILDS))
$(ANDROID_MAKEFILES):
GYP_GENERATORS=make-android \
GYP_DEFINES="${DEFINES}" \
- CC="${ANDROID_TOOLCHAIN}/bin/*-gcc" \
- CXX="${ANDROID_TOOLCHAIN}/bin/*-g++" \
+ CC="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-gcc" \
+ CXX="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-g++" \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index fe15157dd..4f7a96009 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -98,3 +98,12 @@ def CheckChangeOnCommit(input_api, output_api):
input_api, output_api,
json_url='http://v8-status.appspot.com/current?format=json'))
return results
+
+
+def GetPreferredTryMasters(project, change):
+ return {
+ 'tryserver.v8': {
+ 'v8_mac_rel': set(['defaulttests']),
+ 'v8_win_rel': set(['defaulttests']),
+ },
+ }
diff --git a/deps/v8/build/all.gyp b/deps/v8/build/all.gyp
index 5fbd8c28e..3860379ea 100644
--- a/deps/v8/build/all.gyp
+++ b/deps/v8/build/all.gyp
@@ -16,6 +16,7 @@
['component!="shared_library"', {
'dependencies': [
'../tools/lexer-shell.gyp:lexer-shell',
+ '../tools/lexer-shell.gyp:parser-shell',
],
}],
]
diff --git a/deps/v8/build/android.gypi b/deps/v8/build/android.gypi
index 0ea899d6e..9570f444f 100644
--- a/deps/v8/build/android.gypi
+++ b/deps/v8/build/android.gypi
@@ -184,6 +184,16 @@
'-L<(android_stlport_libs)/x86',
],
}],
+ ['target_arch=="x64"', {
+ 'ldflags': [
+ '-L<(android_stlport_libs)/x86_64',
+ ],
+ }],
+ ['target_arch=="arm64"', {
+ 'ldflags': [
+ '-L<(android_stlport_libs)/arm64-v8a',
+ ],
+ }],
],
}],
['target_arch=="ia32"', {
@@ -208,10 +218,19 @@
],
'target_conditions': [
['_type=="executable"', {
+ 'conditions': [
+ ['target_arch=="arm64"', {
+ 'ldflags': [
+ '-Wl,-dynamic-linker,/system/bin/linker64',
+ ],
+ }, {
+ 'ldflags': [
+ '-Wl,-dynamic-linker,/system/bin/linker',
+ ],
+ }]
+ ],
'ldflags': [
'-Bdynamic',
- '-Wl,-dynamic-linker,/system/bin/linker',
- '-Wl,--gc-sections',
'-Wl,-z,nocopyreloc',
# crtbegin_dynamic.o should be the last item in ldflags.
'<(android_lib)/crtbegin_dynamic.o',
@@ -238,8 +257,15 @@
}], # _toolset=="target"
# Settings for building host targets using the system toolchain.
['_toolset=="host"', {
- 'cflags': [ '-m32', '-pthread' ],
- 'ldflags': [ '-m32', '-pthread' ],
+ 'conditions': [
+ ['target_arch=="x64"', {
+ 'cflags': [ '-m64', '-pthread' ],
+ 'ldflags': [ '-m64', '-pthread' ],
+ }, {
+ 'cflags': [ '-m32', '-pthread' ],
+ 'ldflags': [ '-m32', '-pthread' ],
+ }],
+ ],
'ldflags!': [
'-Wl,-z,noexecstack',
'-Wl,--gc-sections',
diff --git a/deps/v8/build/features.gypi b/deps/v8/build/features.gypi
index f0e721209..85b8a3846 100644
--- a/deps/v8/build/features.gypi
+++ b/deps/v8/build/features.gypi
@@ -115,7 +115,7 @@
'Release': {
'variables': {
'v8_enable_extra_checks%': 0,
- 'v8_enable_handle_zapping%': 0,
+ 'v8_enable_handle_zapping%': 1,
},
'conditions': [
['v8_enable_extra_checks==1', {
diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi
index bcfce39ad..6ff0170b9 100644
--- a/deps/v8/build/standalone.gypi
+++ b/deps/v8/build/standalone.gypi
@@ -34,6 +34,7 @@
'variables': {
'component%': 'static_library',
'clang%': 0,
+ 'asan%': 0,
'visibility%': 'hidden',
'v8_enable_backtrace%': 0,
'v8_enable_i18n_support%': 1,
@@ -52,7 +53,11 @@
# to gyp.
'host_arch%':
'<!(uname -m | sed -e "s/i.86/ia32/;\
- s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mipsel/")',
+ s/x86_64/x64/;\
+ s/amd64/x64/;\
+ s/aarch64/arm64/;\
+ s/arm.*/arm/;\
+ s/mips.*/mipsel/")',
}, {
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and
# OS!="netbsd" and OS!="mac"
@@ -97,6 +102,7 @@
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
+ (v8_target_arch=="arm64" and host_arch!="arm64") or \
(v8_target_arch=="mipsel" and host_arch!="mipsel") or \
(v8_target_arch=="x64" and host_arch!="x64") or \
(OS=="android" or OS=="qnx")', {
@@ -164,6 +170,22 @@
],
},
'conditions': [
+ ['asan==1', {
+ 'target_defaults': {
+ 'cflags_cc+': [
+ '-fno-omit-frame-pointer',
+ '-gline-tables-only',
+ '-fsanitize=address',
+ '-w', # http://crbug.com/162783
+ ],
+ 'cflags_cc!': [
+ '-fomit-frame-pointer',
+ ],
+ 'ldflags': [
+ '-fsanitize=address',
+ ],
+ },
+ }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd"', {
'target_defaults': {
@@ -322,6 +344,12 @@
}, {
'xcode_settings': {'GCC_TREAT_WARNINGS_AS_ERRORS': 'YES'},
}],
+ ['clang==1', {
+ 'xcode_settings': {
+ 'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
+ 'CLANG_CXX_LANGUAGE_STANDARD': 'gnu++11', # -std=gnu++11
+ },
+ }],
],
'target_conditions': [
['_type!="static_library"', {
diff --git a/deps/v8/build/toolchain.gypi b/deps/v8/build/toolchain.gypi
index badac2651..4a70d6f7a 100644
--- a/deps/v8/build/toolchain.gypi
+++ b/deps/v8/build/toolchain.gypi
@@ -268,6 +268,11 @@
}], # _toolset=="target"
],
}], # v8_target_arch=="arm"
+ ['v8_target_arch=="arm64"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_ARM64',
+ ],
+ }],
['v8_target_arch=="ia32"', {
'defines': [
'V8_TARGET_ARCH_IA32',
@@ -407,7 +412,8 @@
}],
],
}],
- ['(OS=="linux") and (v8_target_arch=="x64")', {
+ ['(OS=="linux" or OS=="android") and \
+ (v8_target_arch=="x64" or v8_target_arch=="arm64")', {
# Check whether the host compiler and target compiler support the
# '-m64' option and set it if so.
'target_conditions': [
@@ -422,8 +428,12 @@
'variables': {
'm64flag': '<!(($(echo ${CXX_target:-<(CXX)}) -m64 -E - > /dev/null 2>&1 < /dev/null) && echo "-m64" || true)',
},
- 'cflags': [ '<(m64flag)' ],
- 'ldflags': [ '<(m64flag)' ],
+ 'conditions': [
+ ['((OS!="android" and OS!="qnx") or clang==1)', {
+ 'cflags': [ '<(m64flag)' ],
+ 'ldflags': [ '<(m64flag)' ],
+ }],
+ ],
}]
],
}],
@@ -513,7 +523,8 @@
OS=="qnx"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual',
- '<(wno_array_bounds)' ],
+ '<(wno_array_bounds)',
+ ],
'conditions': [
['v8_optimized_debug==0', {
'cflags!': [
diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h
index 1a86a061e..1a86a061e 100755..100644
--- a/deps/v8/include/v8-debug.h
+++ b/deps/v8/include/v8-debug.h
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 46752e968..1691f2973 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -35,6 +35,9 @@
*/
namespace v8 {
+class HeapGraphNode;
+struct HeapStatsUpdate;
+
typedef uint32_t SnapshotObjectId;
/**
@@ -158,12 +161,18 @@ class V8_EXPORT CpuProfiler {
* |record_samples| parameter controls whether individual samples should
* be recorded in addition to the aggregated tree.
*/
+ void StartProfiling(Handle<String> title, bool record_samples = false);
+
+ /** Deprecated. Use StartProfiling instead. */
void StartCpuProfiling(Handle<String> title, bool record_samples = false);
/**
* Stops collecting CPU profile with a given title and returns it.
* If the title given is empty, finishes the last profile started.
*/
+ CpuProfile* StopProfiling(Handle<String> title);
+
+ /** Deprecated. Use StopProfiling instead. */
const CpuProfile* StopCpuProfiling(Handle<String> title);
/**
@@ -179,9 +188,6 @@ class V8_EXPORT CpuProfiler {
};
-class HeapGraphNode;
-
-
/**
* HeapSnapshotEdge represents a directed connection between heap
* graph nodes: from retainers to retained nodes.
@@ -257,7 +263,11 @@ class V8_EXPORT HeapGraphNode {
SnapshotObjectId GetId() const;
/** Returns node's own size, in bytes. */
- int GetSelfSize() const;
+ V8_DEPRECATED("Use GetShallowSize instead",
+ int GetSelfSize() const);
+
+ /** Returns node's own size, in bytes. */
+ size_t GetShallowSize() const;
/** Returns child nodes count of the node. */
int GetChildrenCount() const;
@@ -268,6 +278,37 @@ class V8_EXPORT HeapGraphNode {
/**
+ * An interface for exporting data from V8, using "push" model.
+ */
+class V8_EXPORT OutputStream { // NOLINT
+ public:
+ enum WriteResult {
+ kContinue = 0,
+ kAbort = 1
+ };
+ virtual ~OutputStream() {}
+ /** Notify about the end of stream. */
+ virtual void EndOfStream() = 0;
+ /** Get preferred output chunk size. Called only once. */
+ virtual int GetChunkSize() { return 1024; }
+ /**
+ * Writes the next chunk of snapshot data into the stream. Writing
+ * can be stopped by returning kAbort as function result. EndOfStream
+ * will not be called in case writing was aborted.
+ */
+ virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
+ /**
+ * Writes the next chunk of heap stats data into the stream. Writing
+ * can be stopped by returning kAbort as function result. EndOfStream
+ * will not be called in case writing was aborted.
+ */
+ virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
+ return kAbort;
+ };
+};
+
+
+/**
* HeapSnapshots record the state of the JS heap at some moment.
*/
class V8_EXPORT HeapSnapshot {
@@ -334,7 +375,24 @@ class V8_EXPORT HeapSnapshot {
};
-class RetainedObjectInfo;
+/**
+ * An interface for reporting progress and controlling long-running
+ * activities.
+ */
+class V8_EXPORT ActivityControl { // NOLINT
+ public:
+ enum ControlOption {
+ kContinue = 0,
+ kAbort = 1
+ };
+ virtual ~ActivityControl() {}
+ /**
+ * Notify about current progress. The activity can be stopped by
+ * returning kAbort as the callback result.
+ */
+ virtual ControlOption ReportProgressValue(int done, int total) = 0;
+};
+
/**
* Interface for controlling heap profiling. Instance of the
diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h
new file mode 100644
index 000000000..3f8cc6d26
--- /dev/null
+++ b/deps/v8/include/v8-util.h
@@ -0,0 +1,355 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_UTIL_H_
+#define V8_UTIL_H_
+
+#include "v8.h"
+#include <map>
+
+/**
+ * Support for Persistent containers.
+ *
+ * C++11 embedders can use STL containers with UniquePersistent values,
+ * but pre-C++11 does not support the required move semantic and hence
+ * may want these container classes.
+ */
+namespace v8 {
+
+typedef uintptr_t PersistentContainerValue;
+static const uintptr_t kPersistentContainerNotFound = 0;
+
+
+/**
+ * A default trait implemenation for PersistentValueMap which uses std::map
+ * as a backing map.
+ *
+ * Users will have to implement their own weak callbacks & dispose traits.
+ */
+template<typename K, typename V>
+class StdMapTraits {
+ public:
+ // STL map & related:
+ typedef std::map<K, PersistentContainerValue> Impl;
+ typedef typename Impl::iterator Iterator;
+
+ static bool Empty(Impl* impl) { return impl->empty(); }
+ static size_t Size(Impl* impl) { return impl->size(); }
+ static void Swap(Impl& a, Impl& b) { std::swap(a, b); } // NOLINT
+ static Iterator Begin(Impl* impl) { return impl->begin(); }
+ static Iterator End(Impl* impl) { return impl->end(); }
+ static K Key(Iterator it) { return it->first; }
+ static PersistentContainerValue Value(Iterator it) { return it->second; }
+ static PersistentContainerValue Set(Impl* impl, K key,
+ PersistentContainerValue value) {
+ std::pair<Iterator, bool> res = impl->insert(std::make_pair(key, value));
+ PersistentContainerValue old_value = kPersistentContainerNotFound;
+ if (!res.second) {
+ old_value = res.first->second;
+ res.first->second = value;
+ }
+ return old_value;
+ }
+ static PersistentContainerValue Get(Impl* impl, K key) {
+ Iterator it = impl->find(key);
+ if (it == impl->end()) return kPersistentContainerNotFound;
+ return it->second;
+ }
+ static PersistentContainerValue Remove(Impl* impl, K key) {
+ Iterator it = impl->find(key);
+ if (it == impl->end()) return kPersistentContainerNotFound;
+ PersistentContainerValue value = it->second;
+ impl->erase(it);
+ return value;
+ }
+};
+
+
+/**
+ * A default trait implementation for PersistentValueMap, which inherits
+ * a std:map backing map from StdMapTraits and holds non-weak persistent
+ * objects.
+ *
+ * Users have to implement their own dispose trait.
+ */
+template<typename K, typename V>
+class StrongMapTraits : public StdMapTraits<K, V> {
+ public:
+ // Weak callback & friends:
+ static const bool kIsWeak = false;
+ typedef typename StdMapTraits<K, V>::Impl Impl;
+ typedef void WeakCallbackDataType;
+ static WeakCallbackDataType* WeakCallbackParameter(
+ Impl* impl, const K& key, Local<V> value);
+ static Impl* ImplFromWeakCallbackData(
+ const WeakCallbackData<V, WeakCallbackDataType>& data);
+ static K KeyFromWeakCallbackData(
+ const WeakCallbackData<V, WeakCallbackDataType>& data);
+ static void DisposeCallbackData(WeakCallbackDataType* data);
+};
+
+
+/**
+ * A default trait implementation for PersistentValueMap, with a std::map
+ * backing map, non-weak persistents as values, and no special dispose
+ * handling. Can be used as-is.
+ */
+template<typename K, typename V>
+class DefaultPersistentValueMapTraits : public StrongMapTraits<K, V> {
+ public:
+ typedef typename StrongMapTraits<K, V>::Impl Impl;
+ static void Dispose(Isolate* isolate, UniquePersistent<V> value,
+ Impl* impl, K key) { }
+};
+
+
+/**
+ * A map wrapper that allows using UniquePersistent as a mapped value.
+ * C++11 embedders don't need this class, as they can use UniquePersistent
+ * directly in std containers.
+ *
+ * The map relies on a backing map, whose type and accessors are described
+ * by the Traits class. The backing map will handle values of type
+ * PersistentContainerValue, with all conversion into and out of V8
+ * handles being transparently handled by this class.
+ */
+template<typename K, typename V, typename Traits>
+class PersistentValueMap {
+ public:
+ V8_INLINE explicit PersistentValueMap(Isolate* isolate) : isolate_(isolate) {}
+
+ V8_INLINE ~PersistentValueMap() { Clear(); }
+
+ V8_INLINE Isolate* GetIsolate() { return isolate_; }
+
+ /**
+ * Return size of the map.
+ */
+ V8_INLINE size_t Size() { return Traits::Size(&impl_); }
+
+ /**
+ * Return whether the map holds weak persistents.
+ */
+ V8_INLINE bool IsWeak() { return Traits::kIsWeak; }
+
+ /**
+ * Get value stored in map.
+ */
+ V8_INLINE Local<V> Get(const K& key) {
+ return Local<V>::New(isolate_, FromVal(Traits::Get(&impl_, key)));
+ }
+
+ /**
+ * Check whether a value is contained in the map.
+ */
+ V8_INLINE bool Contains(const K& key) {
+ return Traits::Get(&impl_, key) != 0;
+ }
+
+ /**
+ * Get value stored in map and set it in returnValue.
+ * Return true if a value was found.
+ */
+ V8_INLINE bool SetReturnValue(const K& key,
+ ReturnValue<Value>& returnValue) {
+ PersistentContainerValue value = Traits::Get(&impl_, key);
+ bool hasValue = value != 0;
+ if (hasValue) {
+ returnValue.SetInternal(
+ *reinterpret_cast<internal::Object**>(FromVal(value)));
+ }
+ return hasValue;
+ }
+
+ /**
+ * Call Isolate::SetReference with the given parent and the map value.
+ */
+ V8_INLINE void SetReference(const K& key,
+ const Persistent<Object>& parent) {
+ GetIsolate()->SetReference(
+ reinterpret_cast<internal::Object**>(parent.val_),
+ reinterpret_cast<internal::Object**>(FromVal(Traits::Get(&impl_, key))));
+ }
+
+ /**
+ * Put value into map. Depending on Traits::kIsWeak, the value will be held
+ * by the map strongly or weakly.
+ * Returns old value as UniquePersistent.
+ */
+ UniquePersistent<V> Set(const K& key, Local<V> value) {
+ UniquePersistent<V> persistent(isolate_, value);
+ return SetUnique(key, &persistent);
+ }
+
+ /**
+ * Put value into map, like Set(const K&, Local<V>).
+ */
+ UniquePersistent<V> Set(const K& key, UniquePersistent<V> value) {
+ return SetUnique(key, &value);
+ }
+
+ /**
+ * Return value for key and remove it from the map.
+ */
+ V8_INLINE UniquePersistent<V> Remove(const K& key) {
+ return Release(Traits::Remove(&impl_, key)).Pass();
+ }
+
+ /**
+ * Traverses the map repeatedly,
+ * in case side effects of disposal cause insertions.
+ **/
+ void Clear() {
+ typedef typename Traits::Iterator It;
+ HandleScope handle_scope(isolate_);
+ // TODO(dcarney): figure out if this swap and loop is necessary.
+ while (!Traits::Empty(&impl_)) {
+ typename Traits::Impl impl;
+ Traits::Swap(impl_, impl);
+ for (It i = Traits::Begin(&impl); i != Traits::End(&impl); ++i) {
+ Traits::Dispose(isolate_, Release(Traits::Value(i)).Pass(), &impl,
+ Traits::Key(i));
+ }
+ }
+ }
+
+ private:
+ PersistentValueMap(PersistentValueMap&);
+ void operator=(PersistentValueMap&);
+
+ /**
+ * Put the value into the map, and set the 'weak' callback when demanded
+ * by the Traits class.
+ */
+ UniquePersistent<V> SetUnique(const K& key, UniquePersistent<V>* persistent) {
+ if (Traits::kIsWeak) {
+ Local<V> value(Local<V>::New(isolate_, *persistent));
+ persistent->template SetWeak<typename Traits::WeakCallbackDataType>(
+ Traits::WeakCallbackParameter(&impl_, key, value), WeakCallback);
+ }
+ PersistentContainerValue old_value =
+ Traits::Set(&impl_, key, ClearAndLeak(persistent));
+ return Release(old_value).Pass();
+ }
+
+ static void WeakCallback(
+ const WeakCallbackData<V, typename Traits::WeakCallbackDataType>& data) {
+ if (Traits::kIsWeak) {
+ typename Traits::Impl* impl = Traits::ImplFromWeakCallbackData(data);
+ K key = Traits::KeyFromWeakCallbackData(data);
+ PersistentContainerValue value = Traits::Remove(impl, key);
+ Traits::Dispose(data.GetIsolate(), Release(value).Pass(), impl, key);
+ }
+ }
+
+ V8_INLINE static V* FromVal(PersistentContainerValue v) {
+ return reinterpret_cast<V*>(v);
+ }
+
+ V8_INLINE static PersistentContainerValue ClearAndLeak(
+ UniquePersistent<V>* persistent) {
+ V* v = persistent->val_;
+ persistent->val_ = 0;
+ return reinterpret_cast<PersistentContainerValue>(v);
+ }
+
+ /**
+ * Return a container value as UniquePersistent and make sure the weak
+ * callback is properly disposed of. All remove functionality should go
+ * through this.
+ */
+ V8_INLINE static UniquePersistent<V> Release(PersistentContainerValue v) {
+ UniquePersistent<V> p;
+ p.val_ = FromVal(v);
+ if (Traits::kIsWeak && !p.IsEmpty()) {
+ Traits::DisposeCallbackData(
+ p.template ClearWeak<typename Traits::WeakCallbackDataType>());
+ }
+ return p.Pass();
+ }
+
+ Isolate* isolate_;
+ typename Traits::Impl impl_;
+};
+
+
+/**
+ * A map that uses UniquePersistent as value and std::map as the backing
+ * implementation. Persistents are held non-weak.
+ *
+ * C++11 embedders don't need this class, as they can use
+ * UniquePersistent directly in std containers.
+ */
+template<typename K, typename V,
+ typename Traits = DefaultPersistentValueMapTraits<K, V> >
+class StdPersistentValueMap : public PersistentValueMap<K, V, Traits> {
+ public:
+ explicit StdPersistentValueMap(Isolate* isolate)
+ : PersistentValueMap<K, V, Traits>(isolate) {}
+};
+
+
+/**
+ * Empty default implementations for StrongTraits methods.
+ *
+ * These should not be necessary, since they're only used in code that
+ * is surrounded by if(Traits::kIsWeak), which for StrongMapTraits is
+ * compile-time false. Most compilers can live without them; however
+ * the compiler we use from 64-bit Win differs.
+ *
+ * TODO(vogelheim): Remove these once they're no longer necessary.
+ */
+template<typename K, typename V>
+typename StrongMapTraits<K, V>::WeakCallbackDataType*
+ StrongMapTraits<K, V>::WeakCallbackParameter(
+ Impl* impl, const K& key, Local<V> value) {
+ return NULL;
+}
+
+
+template<typename K, typename V>
+typename StrongMapTraits<K, V>::Impl*
+ StrongMapTraits<K, V>::ImplFromWeakCallbackData(
+ const WeakCallbackData<V, WeakCallbackDataType>& data) {
+ return NULL;
+}
+
+
+template<typename K, typename V>
+K StrongMapTraits<K, V>::KeyFromWeakCallbackData(
+ const WeakCallbackData<V, WeakCallbackDataType>& data) {
+ return K();
+}
+
+
+template<typename K, typename V>
+void StrongMapTraits<K, V>::DisposeCallbackData(WeakCallbackDataType* data) {
+}
+
+} // namespace v8
+
+#endif // V8_UTIL_H_
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index fe3b02041..608e3c52c 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -108,6 +108,7 @@ class ObjectTemplate;
class Platform;
class Primitive;
class RawOperationDescriptor;
+class Script;
class Signature;
class StackFrame;
class StackTrace;
@@ -127,10 +128,12 @@ template<class T> class PersistentBase;
template<class T,
class M = NonCopyablePersistentTraits<T> > class Persistent;
template<class T> class UniquePersistent;
+template<class K, class V, class T> class PersistentValueMap;
template<class T, class P> class WeakCallbackObject;
class FunctionTemplate;
class ObjectTemplate;
class Data;
+template<typename T> class FunctionCallbackInfo;
template<typename T> class PropertyCallbackInfo;
class StackTrace;
class StackFrame;
@@ -140,6 +143,7 @@ class ObjectOperationDescriptor;
class RawOperationDescriptor;
class CallHandlerHelper;
class EscapableHandleScope;
+template<typename T> class ReturnValue;
namespace internal {
class Arguments;
@@ -412,6 +416,7 @@ template <class T> class Local : public Handle<T> {
template<class F> friend class internal::CustomArguments;
friend class HandleScope;
friend class EscapableHandleScope;
+ template<class F1, class F2, class F3> friend class PersistentValueMap;
V8_INLINE static Local<T> New(Isolate* isolate, T* that);
};
@@ -527,7 +532,11 @@ template <class T> class PersistentBase {
P* parameter,
typename WeakCallbackData<S, P>::Callback callback);
- V8_INLINE void ClearWeak();
+ template<typename P>
+ V8_INLINE P* ClearWeak();
+
+ // TODO(dcarney): remove this.
+ V8_INLINE void ClearWeak() { ClearWeak<void>(); }
/**
* Marks the reference to this object independent. Garbage collector is free
@@ -576,6 +585,8 @@ template <class T> class PersistentBase {
template<class F> friend class UniquePersistent;
template<class F> friend class PersistentBase;
template<class F> friend class ReturnValue;
+ template<class F1, class F2, class F3> friend class PersistentValueMap;
+ friend class Object;
explicit V8_INLINE PersistentBase(T* val) : val_(val) {}
PersistentBase(PersistentBase& other); // NOLINT
@@ -743,7 +754,7 @@ class UniquePersistent : public PersistentBase<T> {
};
public:
- /**
+ /**
* A UniquePersistent with no storage cell.
*/
V8_INLINE UniquePersistent() : PersistentBase<T>(0) { }
@@ -781,6 +792,7 @@ class UniquePersistent : public PersistentBase<T> {
template<class S>
V8_INLINE UniquePersistent& operator=(UniquePersistent<S> rhs) {
TYPE_CHECK(T, S);
+ this->Reset();
this->val_ = rhs.val_;
rhs.val_ = 0;
return *this;
@@ -998,114 +1010,188 @@ class ScriptOrigin {
/**
- * A compiled JavaScript script.
+ * A compiled JavaScript script, not yet tied to a Context.
*/
-class V8_EXPORT Script {
+class V8_EXPORT UnboundScript {
public:
/**
- * Compiles the specified script (context-independent).
- *
- * \param source Script source code.
- * \param origin Script origin, owned by caller, no references are kept
- * when New() returns
- * \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
- * using pre_data speeds compilation if it's done multiple times.
- * Owned by caller, no references are kept when New() returns.
- * \param script_data Arbitrary data associated with script. Using
- * this has same effect as calling SetData(), but allows data to be
- * available to compile event handlers.
- * \return Compiled script object (context independent; when run it
- * will use the currently entered context).
+ * Binds the script to the currently entered context.
*/
- static Local<Script> New(Handle<String> source,
- ScriptOrigin* origin = NULL,
- ScriptData* pre_data = NULL,
- Handle<String> script_data = Handle<String>());
+ Local<Script> BindToCurrentContext();
+
+ int GetId();
+ Handle<Value> GetScriptName();
/**
- * Compiles the specified script using the specified file name
- * object (typically a string) as the script's origin.
- *
- * \param source Script source code.
- * \param file_name file name object (typically a string) to be used
- * as the script's origin.
- * \return Compiled script object (context independent; when run it
- * will use the currently entered context).
+ * Returns zero based line number of the code_pos location in the script.
+ * -1 will be returned if no information available.
*/
- static Local<Script> New(Handle<String> source,
- Handle<Value> file_name);
+ int GetLineNumber(int code_pos);
+
+ static const int kNoScriptId = 0;
+};
+
+/**
+ * A compiled JavaScript script, tied to a Context which was active when the
+ * script was compiled.
+ */
+class V8_EXPORT Script {
+ public:
/**
- * Compiles the specified script (bound to current context).
- *
- * \param source Script source code.
- * \param origin Script origin, owned by caller, no references are kept
- * when Compile() returns
- * \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
- * using pre_data speeds compilation if it's done multiple times.
- * Owned by caller, no references are kept when Compile() returns.
- * \param script_data Arbitrary data associated with script. Using
- * this has same effect as calling SetData(), but makes data available
- * earlier (i.e. to compile event handlers).
- * \return Compiled script object, bound to the context that was active
- * when this function was called. When run it will always use this
- * context.
+ * A shorthand for ScriptCompiler::Compile().
+ * The ScriptData parameter will be deprecated; use ScriptCompiler::Compile if
+ * you want to pass it.
*/
static Local<Script> Compile(Handle<String> source,
ScriptOrigin* origin = NULL,
- ScriptData* pre_data = NULL,
- Handle<String> script_data = Handle<String>());
+ ScriptData* script_data = NULL);
- /**
- * Compiles the specified script using the specified file name
- * object (typically a string) as the script's origin.
- *
- * \param source Script source code.
- * \param file_name File name to use as script's origin
- * \param script_data Arbitrary data associated with script. Using
- * this has same effect as calling SetData(), but makes data available
- * earlier (i.e. to compile event handlers).
- * \return Compiled script object, bound to the context that was active
- * when this function was called. When run it will always use this
- * context.
- */
+ // To be decprecated, use the Compile above.
static Local<Script> Compile(Handle<String> source,
- Handle<Value> file_name,
- Handle<String> script_data = Handle<String>());
+ Handle<String> file_name);
/**
- * Runs the script returning the resulting value. If the script is
- * context independent (created using ::New) it will be run in the
- * currently entered context. If it is context specific (created
- * using ::Compile) it will be run in the context in which it was
- * compiled.
+ * Runs the script returning the resulting value. It will be run in the
+ * context in which it was created (ScriptCompiler::CompileBound or
+ * UnboundScript::BindToGlobalContext()).
*/
Local<Value> Run();
/**
- * Returns the script id.
+ * Returns the corresponding context-unbound script.
*/
- int GetId();
+ Local<UnboundScript> GetUnboundScript();
+
+ // To be deprecated; use GetUnboundScript()->GetId();
+ int GetId() {
+ return GetUnboundScript()->GetId();
+ }
+
+ // Use GetUnboundScript()->GetId();
+ V8_DEPRECATED("Use GetUnboundScript()->GetId()",
+ Handle<Value> GetScriptName()) {
+ return GetUnboundScript()->GetScriptName();
+ }
/**
- * Associate an additional data object with the script. This is mainly used
- * with the debugger as this data object is only available through the
- * debugger API.
+ * Returns zero based line number of the code_pos location in the script.
+ * -1 will be returned if no information available.
*/
- void SetData(Handle<String> data);
+ V8_DEPRECATED("Use GetUnboundScript()->GetLineNumber()",
+ int GetLineNumber(int code_pos)) {
+ return GetUnboundScript()->GetLineNumber(code_pos);
+ }
+};
+
+/**
+ * For compiling scripts.
+ */
+class V8_EXPORT ScriptCompiler {
+ public:
/**
- * Returns the name value of one Script.
+ * Compilation data that the embedder can cache and pass back to speed up
+ * future compilations. The data is produced if the CompilerOptions passed to
+ * the compilation functions in ScriptCompiler contains produce_data_to_cache
+ * = true. The data to cache can then can be retrieved from
+ * UnboundScript.
+ */
+ struct V8_EXPORT CachedData {
+ enum BufferPolicy {
+ BufferNotOwned,
+ BufferOwned
+ };
+
+ CachedData() : data(NULL), length(0), buffer_policy(BufferNotOwned) {}
+
+ // If buffer_policy is BufferNotOwned, the caller keeps the ownership of
+ // data and guarantees that it stays alive until the CachedData object is
+ // destroyed. If the policy is BufferOwned, the given data will be deleted
+ // (with delete[]) when the CachedData object is destroyed.
+ CachedData(const uint8_t* data, int length,
+ BufferPolicy buffer_policy = BufferNotOwned);
+ ~CachedData();
+ // TODO(marja): Async compilation; add constructors which take a callback
+ // which will be called when V8 no longer needs the data.
+ const uint8_t* data;
+ int length;
+ BufferPolicy buffer_policy;
+
+ private:
+ // Prevent copying. Not implemented.
+ CachedData(const CachedData&);
+ CachedData& operator=(const CachedData&);
+ };
+
+ /**
+ * Source code which can be then compiled to a UnboundScript or
+ * BoundScript.
*/
- Handle<Value> GetScriptName();
+ class Source {
+ public:
+ // Source takes ownership of CachedData.
+ V8_INLINE Source(Local<String> source_string, const ScriptOrigin& origin,
+ CachedData* cached_data = NULL);
+ V8_INLINE Source(Local<String> source_string,
+ CachedData* cached_data = NULL);
+ V8_INLINE ~Source();
+
+ // Ownership of the CachedData or its buffers is *not* transferred to the
+ // caller. The CachedData object is alive as long as the Source object is
+ // alive.
+ V8_INLINE const CachedData* GetCachedData() const;
+
+ private:
+ friend class ScriptCompiler;
+ // Prevent copying. Not implemented.
+ Source(const Source&);
+ Source& operator=(const Source&);
+
+ Local<String> source_string;
+
+ // Origin information
+ Handle<Value> resource_name;
+ Handle<Integer> resource_line_offset;
+ Handle<Integer> resource_column_offset;
+ Handle<Boolean> resource_is_shared_cross_origin;
+
+ // Cached data from previous compilation (if any), or generated during
+ // compilation (if the generate_cached_data flag is passed to
+ // ScriptCompiler).
+ CachedData* cached_data;
+ };
+
+ enum CompileOptions {
+ kNoCompileOptions,
+ kProduceDataToCache = 1 << 0
+ };
/**
- * Returns zero based line number of the code_pos location in the script.
- * -1 will be returned if no information available.
+ * Compiles the specified script (context-independent).
+ *
+ * \param source Script source code.
+ * \return Compiled script object (context independent; for running it must be
+ * bound to a context).
*/
- int GetLineNumber(int code_pos);
+ static Local<UnboundScript> CompileUnbound(
+ Isolate* isolate, Source* source,
+ CompileOptions options = kNoCompileOptions);
- static const int kNoScriptId = 0;
+ /**
+ * Compiles the specified script (bound to current context).
+ *
+ * \param source Script source code.
+ * \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
+ * using pre_data speeds compilation if it's done multiple times.
+ * Owned by caller, no references are kept when this function returns.
+ * \return Compiled script object, bound to the context that was active
+ * when this function was called. When run it will always use this
+ * context.
+ */
+ static Local<Script> Compile(
+ Isolate* isolate, Source* source,
+ CompileOptions options = kNoCompileOptions);
};
@@ -1430,6 +1516,11 @@ class V8_EXPORT Value : public Data {
*/
bool IsRegExp() const;
+ /**
+ * Returns true if this value is a Promise.
+ * This is an experimental feature.
+ */
+ bool IsPromise() const;
/**
* Returns true if this value is an ArrayBuffer.
@@ -1911,9 +2002,20 @@ class V8_EXPORT Symbol : public Primitive {
// Returns the print name string of the symbol, or undefined if none.
Local<Value> Name() const;
- // Create a symbol. If data is not NULL, it will be used as a print name.
+ // Create a symbol. If name is not empty, it will be used as the description.
static Local<Symbol> New(
- Isolate *isolate, const char* data = NULL, int length = -1);
+ Isolate *isolate, Local<String> name = Local<String>());
+
+ // Access global symbol registry.
+ // Note that symbols created this way are never collected, so
+ // they should only be used for statically fixed properties.
+ // Also, there is only one global name space for the names used as keys.
+ // To minimize the potential for clashes, use qualified names as keys.
+ static Local<Symbol> For(Isolate *isolate, Local<String> name);
+
+ // Retrieve a global symbol. Similar to |For|, but using a separate
+ // registry that is not accessible by (and cannot clash with) JavaScript code.
+ static Local<Symbol> ForApi(Isolate *isolate, Local<String> name);
V8_INLINE static Symbol* Cast(v8::Value* obj);
private:
@@ -1932,9 +2034,18 @@ class V8_EXPORT Private : public Data {
// Returns the print name string of the private symbol, or undefined if none.
Local<Value> Name() const;
- // Create a private symbol. If data is not NULL, it will be the print name.
+ // Create a private symbol. If name is not empty, it will be the description.
static Local<Private> New(
- Isolate *isolate, const char* data = NULL, int length = -1);
+ Isolate *isolate, Local<String> name = Local<String>());
+
+ // Retrieve a global private symbol. If a symbol with this name has not
+ // been retrieved in the same isolate before, it is created.
+ // Note that private symbols created this way are never collected, so
+ // they should only be used for statically fixed properties.
+ // Also, there is only one global name space for the names used as keys.
+ // To minimize the potential for clashes, use qualified names as keys,
+ // e.g., "Class#property".
+ static Local<Private> ForApi(Isolate *isolate, Local<String> name);
private:
Private();
@@ -2118,6 +2229,12 @@ class V8_EXPORT Object : public Value {
PropertyAttribute attribute = None,
AccessControl settings = DEFAULT);
+ void SetAccessorProperty(Local<String> name,
+ Local<Function> getter,
+ Handle<Function> setter = Handle<Function>(),
+ PropertyAttribute attribute = None,
+ AccessControl settings = DEFAULT);
+
/**
* Functionality for private properties.
* This is an experimental feature, use at your own risk.
@@ -2185,6 +2302,12 @@ class V8_EXPORT Object : public Value {
/** Gets the number of internal fields for this Object. */
int InternalFieldCount();
+ /** Same as above, but works for Persistents */
+ V8_INLINE static int InternalFieldCount(
+ const PersistentBase<Object>& object) {
+ return object.val_->InternalFieldCount();
+ }
+
/** Gets the value from an internal field. */
V8_INLINE Local<Value> GetInternalField(int index);
@@ -2198,6 +2321,12 @@ class V8_EXPORT Object : public Value {
*/
V8_INLINE void* GetAlignedPointerFromInternalField(int index);
+ /** Same as above, but works for Persistents */
+ V8_INLINE static void* GetAlignedPointerFromInternalField(
+ const PersistentBase<Object>& object, int index) {
+ return object.val_->GetAlignedPointerFromInternalField(index);
+ }
+
/**
* Sets a 2-byte-aligned native pointer in an internal field. To retrieve such
* a field, GetAlignedPointerFromInternalField must be used, everything else
@@ -2389,6 +2518,8 @@ class ReturnValue {
template<class F> friend class ReturnValue;
template<class F> friend class FunctionCallbackInfo;
template<class F> friend class PropertyCallbackInfo;
+ template<class F, class G, class H> friend class PersistentValueMap;
+ V8_INLINE void SetInternal(internal::Object* value) { *value_ = value; }
V8_INLINE internal::Object* GetDefaultValue();
V8_INLINE explicit ReturnValue(internal::Object** slot);
internal::Object** value_;
@@ -2542,6 +2673,56 @@ class V8_EXPORT Function : public Object {
static void CheckCast(Value* obj);
};
+
+/**
+ * An instance of the built-in Promise constructor (ES6 draft).
+ * This API is experimental. Only works with --harmony flag.
+ */
+class V8_EXPORT Promise : public Object {
+ public:
+ class V8_EXPORT Resolver : public Object {
+ public:
+ /**
+ * Create a new resolver, along with an associated promise in pending state.
+ */
+ static Local<Resolver> New(Isolate* isolate);
+
+ /**
+ * Extract the associated promise.
+ */
+ Local<Promise> GetPromise();
+
+ /**
+ * Resolve/reject the associated promise with a given value.
+ * Ignored if the promise is no longer pending.
+ */
+ void Resolve(Handle<Value> value);
+ void Reject(Handle<Value> value);
+
+ V8_INLINE static Resolver* Cast(Value* obj);
+
+ private:
+ Resolver();
+ static void CheckCast(Value* obj);
+ };
+
+ /**
+ * Register a resolution/rejection handler with a promise.
+ * The handler is given the respective resolution/rejection value as
+ * an argument. If the promise is already resolved/rejected, the handler is
+ * invoked at the end of turn.
+ */
+ Local<Promise> Chain(Handle<Function> handler);
+ Local<Promise> Catch(Handle<Function> handler);
+
+ V8_INLINE static Promise* Cast(Value* obj);
+
+ private:
+ Promise();
+ static void CheckCast(Value* obj);
+};
+
+
#ifndef V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT
// The number of required internal fields can be defined by embedder.
#define V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT 2
@@ -3805,6 +3986,9 @@ typedef void (*FatalErrorCallback)(const char* location, const char* message);
typedef void (*MessageCallback)(Handle<Message> message, Handle<Value> error);
+// --- Tracing ---
+
+typedef void (*LogEventCallback)(const char* name, int event);
/**
* Create new error objects by calling the corresponding error object
@@ -3959,6 +4143,46 @@ class V8_EXPORT Isolate {
Scope& operator=(const Scope&);
};
+
+ /**
+ * Assert that no Javascript code is invoked.
+ */
+ class DisallowJavascriptExecutionScope {
+ public:
+ enum OnFailure { CRASH_ON_FAILURE, THROW_ON_FAILURE };
+
+ DisallowJavascriptExecutionScope(Isolate* isolate, OnFailure on_failure);
+ ~DisallowJavascriptExecutionScope();
+
+ private:
+ bool on_failure_;
+ void* internal_;
+
+ // Prevent copying of Scope objects.
+ DisallowJavascriptExecutionScope(const DisallowJavascriptExecutionScope&);
+ DisallowJavascriptExecutionScope& operator=(
+ const DisallowJavascriptExecutionScope&);
+ };
+
+
+ /**
+ * Introduce exception to DisallowJavascriptExecutionScope.
+ */
+ class AllowJavascriptExecutionScope {
+ public:
+ explicit AllowJavascriptExecutionScope(Isolate* isolate);
+ ~AllowJavascriptExecutionScope();
+
+ private:
+ void* internal_throws_;
+ void* internal_assert_;
+
+ // Prevent copying of Scope objects.
+ AllowJavascriptExecutionScope(const AllowJavascriptExecutionScope&);
+ AllowJavascriptExecutionScope& operator=(
+ const AllowJavascriptExecutionScope&);
+ };
+
/**
* Types of garbage collections that can be requested via
* RequestGarbageCollectionForTesting.
@@ -4127,13 +4351,12 @@ class V8_EXPORT Isolate {
/**
* Enables the host application to receive a notification before a
- * garbage collection. Allocations are not allowed in the
- * callback function, you therefore cannot manipulate objects (set
- * or delete properties for example) since it is possible such
- * operations will result in the allocation of objects. It is possible
- * to specify the GCType filter for your callback. But it is not possible to
- * register the same callback function two times with different
- * GCType filters.
+ * garbage collection. Allocations are allowed in the callback function,
+ * but the callback is not re-entrant: if the allocation inside it will
+ * trigger the garbage collection, the callback won't be called again.
+ * It is possible to specify the GCType filter for your callback. But it is
+ * not possible to register the same callback function two times with
+ * different GCType filters.
*/
void AddGCPrologueCallback(
GCPrologueCallback callback, GCType gc_type_filter = kGCTypeAll);
@@ -4146,13 +4369,12 @@ class V8_EXPORT Isolate {
/**
* Enables the host application to receive a notification after a
- * garbage collection. Allocations are not allowed in the
- * callback function, you therefore cannot manipulate objects (set
- * or delete properties for example) since it is possible such
- * operations will result in the allocation of objects. It is possible
- * to specify the GCType filter for your callback. But it is not possible to
- * register the same callback function two times with different
- * GCType filters.
+ * garbage collection. Allocations are allowed in the callback function,
+ * but the callback is not re-entrant: if the allocation inside it will
+ * trigger the garbage collection, the callback won't be called again.
+ * It is possible to specify the GCType filter for your callback. But it is
+ * not possible to register the same callback function two times with
+ * different GCType filters.
*/
void AddGCEpilogueCallback(
GCEpilogueCallback callback, GCType gc_type_filter = kGCTypeAll);
@@ -4191,7 +4413,14 @@ class V8_EXPORT Isolate {
*/
void RequestGarbageCollectionForTesting(GarbageCollectionType type);
+ /**
+ * Set the callback to invoke for logging event.
+ */
+ void SetEventLogger(LogEventCallback that);
+
private:
+ template<class K, class V, class Traits> friend class PersistentValueMap;
+
Isolate();
Isolate(const Isolate&);
~Isolate();
@@ -4411,20 +4640,6 @@ class V8_EXPORT V8 {
static void SetArrayBufferAllocator(ArrayBuffer::Allocator* allocator);
/**
- * Ignore out-of-memory exceptions.
- *
- * V8 running out of memory is treated as a fatal error by default.
- * This means that the fatal error handler is called and that V8 is
- * terminated.
- *
- * IgnoreOutOfMemoryException can be used to not treat an
- * out-of-memory situation as a fatal error. This way, the contexts
- * that did not cause the out of memory problem might be able to
- * continue execution.
- */
- static void IgnoreOutOfMemoryException();
-
- /**
* Check if V8 is dead and therefore unusable. This is the case after
* fatal errors such as out-of-memory situations.
*/
@@ -4579,6 +4794,22 @@ class V8_EXPORT V8 {
static void RemoveCallCompletedCallback(CallCompletedCallback callback);
/**
+ * Experimental: Runs the Microtask Work Queue until empty
+ */
+ static void RunMicrotasks(Isolate* isolate);
+
+ /**
+ * Experimental: Enqueues the callback to the Microtask Work Queue
+ */
+ static void EnqueueMicrotask(Isolate* isolate, Handle<Function> microtask);
+
+ /**
+ * Experimental: Controls whether the Microtask Work Queue is automatically
+ * run when the script call depth decrements to zero.
+ */
+ static void SetAutorunMicrotasks(Isolate *source, bool autorun);
+
+ /**
* Initializes from snapshot if possible. Otherwise, attempts to
* initialize from scratch. This function is called implicitly if
* you use the API without calling it first.
@@ -4744,8 +4975,11 @@ class V8_EXPORT V8 {
/**
* Initialize the ICU library bundled with V8. The embedder should only
* invoke this method when using the bundled ICU. Returns true on success.
+ *
+ * If V8 was compiled with the ICU data in an external file, the location
+ * of the data file has to be provided.
*/
- static bool InitializeICU();
+ static bool InitializeICU(const char* icu_data_file = NULL);
/**
* Sets the v8::Platform to use. This should be invoked before V8 is
@@ -4770,7 +5004,7 @@ class V8_EXPORT V8 {
static void MakeWeak(internal::Object** global_handle,
void* data,
WeakCallback weak_callback);
- static void ClearWeak(internal::Object** global_handle);
+ static void* ClearWeak(internal::Object** global_handle);
static void Eternalize(Isolate* isolate,
Value* handle,
int* index);
@@ -5013,7 +5247,7 @@ class V8_EXPORT Context {
void Exit();
/** Returns true if the context has experienced an out of memory situation. */
- bool HasOutOfMemoryException();
+ bool HasOutOfMemoryException() { return false; }
/** Returns an isolate associated with a current context. */
v8::Isolate* GetIsolate();
@@ -5227,67 +5461,6 @@ class V8_EXPORT Locker {
};
-/**
- * A struct for exporting HeapStats data from V8, using "push" model.
- */
-struct HeapStatsUpdate;
-
-
-/**
- * An interface for exporting data from V8, using "push" model.
- */
-class V8_EXPORT OutputStream { // NOLINT
- public:
- enum OutputEncoding {
- kAscii = 0 // 7-bit ASCII.
- };
- enum WriteResult {
- kContinue = 0,
- kAbort = 1
- };
- virtual ~OutputStream() {}
- /** Notify about the end of stream. */
- virtual void EndOfStream() = 0;
- /** Get preferred output chunk size. Called only once. */
- virtual int GetChunkSize() { return 1024; }
- /** Get preferred output encoding. Called only once. */
- virtual OutputEncoding GetOutputEncoding() { return kAscii; }
- /**
- * Writes the next chunk of snapshot data into the stream. Writing
- * can be stopped by returning kAbort as function result. EndOfStream
- * will not be called in case writing was aborted.
- */
- virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
- /**
- * Writes the next chunk of heap stats data into the stream. Writing
- * can be stopped by returning kAbort as function result. EndOfStream
- * will not be called in case writing was aborted.
- */
- virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
- return kAbort;
- };
-};
-
-
-/**
- * An interface for reporting progress and controlling long-running
- * activities.
- */
-class V8_EXPORT ActivityControl { // NOLINT
- public:
- enum ControlOption {
- kContinue = 0,
- kAbort = 1
- };
- virtual ~ActivityControl() {}
- /**
- * Notify about current progress. The activity can be stopped by
- * returning kAbort as the callback result.
- */
- virtual ControlOption ReportProgressValue(int done, int total) = 0;
-};
-
-
// --- Implementation ---
@@ -5398,7 +5571,7 @@ class Internals {
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptyStringRootIndex = 147;
+ static const int kEmptyStringRootIndex = 154;
static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
@@ -5683,8 +5856,10 @@ void PersistentBase<T>::SetWeak(
template <class T>
-void PersistentBase<T>::ClearWeak() {
- V8::ClearWeak(reinterpret_cast<internal::Object**>(this->val_));
+template<typename P>
+P* PersistentBase<T>::ClearWeak() {
+ return reinterpret_cast<P*>(
+ V8::ClearWeak(reinterpret_cast<internal::Object**>(this->val_)));
}
@@ -5925,6 +6100,32 @@ Handle<Boolean> ScriptOrigin::ResourceIsSharedCrossOrigin() const {
}
+ScriptCompiler::Source::Source(Local<String> string, const ScriptOrigin& origin,
+ CachedData* data)
+ : source_string(string),
+ resource_name(origin.ResourceName()),
+ resource_line_offset(origin.ResourceLineOffset()),
+ resource_column_offset(origin.ResourceColumnOffset()),
+ resource_is_shared_cross_origin(origin.ResourceIsSharedCrossOrigin()),
+ cached_data(data) {}
+
+
+ScriptCompiler::Source::Source(Local<String> string,
+ CachedData* data)
+ : source_string(string), cached_data(data) {}
+
+
+ScriptCompiler::Source::~Source() {
+ delete cached_data;
+}
+
+
+const ScriptCompiler::CachedData* ScriptCompiler::Source::GetCachedData()
+ const {
+ return cached_data;
+}
+
+
Handle<Boolean> Boolean::New(Isolate* isolate, bool value) {
return value ? True(isolate) : False(isolate);
}
@@ -6171,6 +6372,22 @@ Array* Array::Cast(v8::Value* value) {
}
+Promise* Promise::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Promise*>(value);
+}
+
+
+Promise::Resolver* Promise::Resolver::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Promise::Resolver*>(value);
+}
+
+
ArrayBuffer* ArrayBuffer::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
diff --git a/deps/v8/samples/lineprocessor.cc b/deps/v8/samples/lineprocessor.cc
index 1d6a3bdba..a8ad0318d 100644
--- a/deps/v8/samples/lineprocessor.cc
+++ b/deps/v8/samples/lineprocessor.cc
@@ -238,7 +238,8 @@ int RunMain(int argc, char* argv[]) {
{
// Compile script in try/catch context.
v8::TryCatch try_catch;
- script = v8::Script::Compile(script_source, script_name);
+ v8::ScriptOrigin origin(script_name);
+ script = v8::Script::Compile(script_source, &origin);
if (script.IsEmpty()) {
// Print errors that happened during compilation.
if (report_exceptions)
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index 92a473231..f8d2c8459 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -304,7 +304,8 @@ bool ExecuteString(v8::Isolate* isolate,
bool report_exceptions) {
v8::HandleScope handle_scope(isolate);
v8::TryCatch try_catch;
- v8::Handle<v8::Script> script = v8::Script::Compile(source, name);
+ v8::ScriptOrigin origin(name);
+ v8::Handle<v8::Script> script = v8::Script::Compile(source, &origin);
if (script.IsEmpty()) {
// Print errors that happened during compilation.
if (report_exceptions)
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 47b0a8563..35cff1af7 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -119,9 +119,7 @@ bool Accessors::IsJSObjectFieldAccessor(typename T::TypeHandle type,
CheckForName(name, isolate->heap()->byte_length_string(),
JSTypedArray::kByteLengthOffset, object_offset) ||
CheckForName(name, isolate->heap()->byte_offset_string(),
- JSTypedArray::kByteOffsetOffset, object_offset) ||
- CheckForName(name, isolate->heap()->buffer_string(),
- JSTypedArray::kBufferOffset, object_offset);
+ JSTypedArray::kByteOffsetOffset, object_offset);
case JS_ARRAY_BUFFER_TYPE:
return
CheckForName(name, isolate->heap()->byte_length_string(),
@@ -131,9 +129,7 @@ bool Accessors::IsJSObjectFieldAccessor(typename T::TypeHandle type,
CheckForName(name, isolate->heap()->byte_length_string(),
JSDataView::kByteLengthOffset, object_offset) ||
CheckForName(name, isolate->heap()->byte_offset_string(),
- JSDataView::kByteOffsetOffset, object_offset) ||
- CheckForName(name, isolate->heap()->buffer_string(),
- JSDataView::kBufferOffset, object_offset);
+ JSDataView::kByteOffsetOffset, object_offset);
default:
return false;
}
@@ -213,7 +209,9 @@ MaybeObject* Accessors::ArraySetLength(Isolate* isolate,
if (has_exception) return Failure::Exception();
if (uint32_v->Number() == number_v->Number()) {
- return array_handle->SetElementsLength(*uint32_v);
+ Handle<Object> result = JSArray::SetElementsLength(array_handle, uint32_v);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
return isolate->Throw(
*isolate->factory()->NewRangeError("invalid_array_length",
@@ -351,26 +349,6 @@ const AccessorDescriptor Accessors::ScriptColumnOffset = {
//
-// Accessors::ScriptData
-//
-
-
-MaybeObject* Accessors::ScriptGetData(Isolate* isolate,
- Object* object,
- void*) {
- Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->data();
-}
-
-
-const AccessorDescriptor Accessors::ScriptData = {
- ScriptGetData,
- IllegalSetter,
- 0
-};
-
-
-//
// Accessors::ScriptType
//
@@ -620,10 +598,7 @@ MaybeObject* Accessors::FunctionSetPrototype(Isolate* isolate,
}
Handle<Object> old_value;
- bool is_observed =
- FLAG_harmony_observation &&
- *function == *object &&
- function->map()->is_observed();
+ bool is_observed = *function == *object && function->map()->is_observed();
if (is_observed) {
if (function->has_prototype())
old_value = handle(function->prototype(), isolate);
@@ -911,10 +886,10 @@ MaybeObject* Accessors::FunctionGetCaller(Isolate* isolate,
if (caller->shared()->bound()) {
return isolate->heap()->null_value();
}
- // Censor if the caller is not a classic mode function.
+ // Censor if the caller is not a sloppy mode function.
// Change from ES5, which used to throw, see:
// https://bugs.ecmascript.org/show_bug.cgi?id=310
- if (!caller->shared()->is_classic_mode()) {
+ if (caller->shared()->strict_mode() == STRICT) {
return isolate->heap()->null_value();
}
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index d157aeaad..83a847222 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -49,7 +49,6 @@ namespace internal {
V(ScriptId) \
V(ScriptLineOffset) \
V(ScriptColumnOffset) \
- V(ScriptData) \
V(ScriptType) \
V(ScriptCompilationType) \
V(ScriptLineEnds) \
@@ -128,7 +127,6 @@ class Accessors : public AllStatic {
static MaybeObject* ScriptGetColumnOffset(Isolate* isolate,
Object* object,
void*);
- static MaybeObject* ScriptGetData(Isolate* isolate, Object* object, void*);
static MaybeObject* ScriptGetType(Isolate* isolate, Object* object, void*);
static MaybeObject* ScriptGetCompilationType(Isolate* isolate,
Object* object,
diff --git a/deps/v8/src/allocation-tracker.cc b/deps/v8/src/allocation-tracker.cc
index 5ec648460..a9103a84a 100644
--- a/deps/v8/src/allocation-tracker.cc
+++ b/deps/v8/src/allocation-tracker.cc
@@ -36,9 +36,9 @@ namespace v8 {
namespace internal {
AllocationTraceNode::AllocationTraceNode(
- AllocationTraceTree* tree, SnapshotObjectId shared_function_info_id)
+ AllocationTraceTree* tree, unsigned function_info_index)
: tree_(tree),
- function_id_(shared_function_info_id),
+ function_info_index_(function_info_index),
total_size_(0),
allocation_count_(0),
id_(tree->next_node_id()) {
@@ -50,19 +50,21 @@ AllocationTraceNode::~AllocationTraceNode() {
}
-AllocationTraceNode* AllocationTraceNode::FindChild(SnapshotObjectId id) {
+AllocationTraceNode* AllocationTraceNode::FindChild(
+ unsigned function_info_index) {
for (int i = 0; i < children_.length(); i++) {
AllocationTraceNode* node = children_[i];
- if (node->function_id() == id) return node;
+ if (node->function_info_index() == function_info_index) return node;
}
return NULL;
}
-AllocationTraceNode* AllocationTraceNode::FindOrAddChild(SnapshotObjectId id) {
- AllocationTraceNode* child = FindChild(id);
+AllocationTraceNode* AllocationTraceNode::FindOrAddChild(
+ unsigned function_info_index) {
+ AllocationTraceNode* child = FindChild(function_info_index);
if (child == NULL) {
- child = new AllocationTraceNode(tree_, id);
+ child = new AllocationTraceNode(tree_, function_info_index);
children_.Add(child);
}
return child;
@@ -78,17 +80,11 @@ void AllocationTraceNode::AddAllocation(unsigned size) {
void AllocationTraceNode::Print(int indent, AllocationTracker* tracker) {
OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' ');
if (tracker != NULL) {
- const char* name = "<unknown function>";
- if (function_id_ != 0) {
- AllocationTracker::FunctionInfo* info =
- tracker->GetFunctionInfo(function_id_);
- if (info != NULL) {
- name = info->name;
- }
- }
- OS::Print("%s #%u", name, id_);
+ AllocationTracker::FunctionInfo* info =
+ tracker->function_info_list()[function_info_index_];
+ OS::Print("%s #%u", info->name, id_);
} else {
- OS::Print("%u #%u", function_id_, id_);
+ OS::Print("%u #%u", function_info_index_, id_);
}
OS::Print("\n");
indent += 2;
@@ -109,9 +105,9 @@ AllocationTraceTree::~AllocationTraceTree() {
AllocationTraceNode* AllocationTraceTree::AddPathFromEnd(
- const Vector<SnapshotObjectId>& path) {
+ const Vector<unsigned>& path) {
AllocationTraceNode* node = root();
- for (SnapshotObjectId* entry = path.start() + path.length() - 1;
+ for (unsigned* entry = path.start() + path.length() - 1;
entry != path.start() - 1;
--entry) {
node = node->FindOrAddChild(*entry);
@@ -126,6 +122,7 @@ void AllocationTraceTree::Print(AllocationTracker* tracker) {
root()->Print(0, tracker);
}
+
void AllocationTracker::DeleteUnresolvedLocation(
UnresolvedLocation** location) {
delete *location;
@@ -134,6 +131,7 @@ void AllocationTracker::DeleteUnresolvedLocation(
AllocationTracker::FunctionInfo::FunctionInfo()
: name(""),
+ function_id(0),
script_name(""),
script_id(0),
line(-1),
@@ -141,26 +139,103 @@ AllocationTracker::FunctionInfo::FunctionInfo()
}
+void AddressToTraceMap::AddRange(Address start, int size,
+ unsigned trace_node_id) {
+ Address end = start + size;
+ RemoveRange(start, end);
+
+ RangeStack new_range(start, trace_node_id);
+ ranges_.insert(RangeMap::value_type(end, new_range));
+}
+
+
+unsigned AddressToTraceMap::GetTraceNodeId(Address addr) {
+ RangeMap::const_iterator it = ranges_.upper_bound(addr);
+ if (it == ranges_.end()) return 0;
+ if (it->second.start <= addr) {
+ return it->second.trace_node_id;
+ }
+ return 0;
+}
+
+
+void AddressToTraceMap::MoveObject(Address from, Address to, int size) {
+ unsigned trace_node_id = GetTraceNodeId(from);
+ if (trace_node_id == 0) return;
+ RemoveRange(from, from + size);
+ AddRange(to, size, trace_node_id);
+}
+
+
+void AddressToTraceMap::Clear() {
+ ranges_.clear();
+}
+
+
+void AddressToTraceMap::Print() {
+ PrintF("[AddressToTraceMap (%" V8PRIuPTR "): \n", ranges_.size());
+ for (RangeMap::iterator it = ranges_.begin(); it != ranges_.end(); ++it) {
+ PrintF("[%p - %p] => %u\n", it->second.start, it->first,
+ it->second.trace_node_id);
+ }
+ PrintF("]\n");
+}
+
+
+void AddressToTraceMap::RemoveRange(Address start, Address end) {
+ RangeMap::iterator it = ranges_.upper_bound(start);
+ if (it == ranges_.end()) return;
+
+ RangeStack prev_range(0, 0);
+
+ RangeMap::iterator to_remove_begin = it;
+ if (it->second.start < start) {
+ prev_range = it->second;
+ }
+ do {
+ if (it->first > end) {
+ if (it->second.start < end) {
+ it->second.start = end;
+ }
+ break;
+ }
+ ++it;
+ }
+ while (it != ranges_.end());
+
+ ranges_.erase(to_remove_begin, it);
+
+ if (prev_range.start != 0) {
+ ranges_.insert(RangeMap::value_type(start, prev_range));
+ }
+}
+
+
static bool AddressesMatch(void* key1, void* key2) {
return key1 == key2;
}
+void AllocationTracker::DeleteFunctionInfo(FunctionInfo** info) {
+ delete *info;
+}
+
+
AllocationTracker::AllocationTracker(
HeapObjectsMap* ids, StringsStorage* names)
: ids_(ids),
names_(names),
- id_to_function_info_(AddressesMatch) {
+ id_to_function_info_index_(AddressesMatch),
+ info_index_for_other_state_(0) {
+ FunctionInfo* info = new FunctionInfo();
+ info->name = "(root)";
+ function_info_list_.Add(info);
}
AllocationTracker::~AllocationTracker() {
unresolved_locations_.Iterate(DeleteUnresolvedLocation);
- for (HashMap::Entry* p = id_to_function_info_.Start();
- p != NULL;
- p = id_to_function_info_.Next(p)) {
- delete reinterpret_cast<AllocationTracker::FunctionInfo* >(p->value);
- }
+ function_info_list_.Iterate(&DeleteFunctionInfo);
}
@@ -193,13 +268,20 @@ void AllocationTracker::AllocationEvent(Address addr, int size) {
SharedFunctionInfo* shared = frame->function()->shared();
SnapshotObjectId id = ids_->FindOrAddEntry(
shared->address(), shared->Size(), false);
- allocation_trace_buffer_[length++] = id;
- AddFunctionInfo(shared, id);
+ allocation_trace_buffer_[length++] = AddFunctionInfo(shared, id);
it.Advance();
}
+ if (length == 0) {
+ unsigned index = functionInfoIndexForVMState(isolate->current_vm_state());
+ if (index != 0) {
+ allocation_trace_buffer_[length++] = index;
+ }
+ }
AllocationTraceNode* top_node = trace_tree_.AddPathFromEnd(
- Vector<SnapshotObjectId>(allocation_trace_buffer_, length));
+ Vector<unsigned>(allocation_trace_buffer_, length));
top_node->AddAllocation(size);
+
+ address_to_trace_.AddRange(addr, size, top_node->id());
}
@@ -209,24 +291,14 @@ static uint32_t SnapshotObjectIdHash(SnapshotObjectId id) {
}
-AllocationTracker::FunctionInfo* AllocationTracker::GetFunctionInfo(
- SnapshotObjectId id) {
- HashMap::Entry* entry = id_to_function_info_.Lookup(
- reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), false);
- if (entry == NULL) {
- return NULL;
- }
- return reinterpret_cast<FunctionInfo*>(entry->value);
-}
-
-
-void AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
- SnapshotObjectId id) {
- HashMap::Entry* entry = id_to_function_info_.Lookup(
+unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
+ SnapshotObjectId id) {
+ HashMap::Entry* entry = id_to_function_info_index_.Lookup(
reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), true);
if (entry->value == NULL) {
FunctionInfo* info = new FunctionInfo();
info->name = names_->GetFunctionName(shared->DebugName());
+ info->function_id = id;
if (shared->script()->IsScript()) {
Script* script = Script::cast(shared->script());
if (script->name()->IsName()) {
@@ -241,8 +313,22 @@ void AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
shared->start_position(),
info));
}
- entry->value = info;
+ entry->value = reinterpret_cast<void*>(function_info_list_.length());
+ function_info_list_.Add(info);
+ }
+ return static_cast<unsigned>(reinterpret_cast<intptr_t>((entry->value)));
+}
+
+
+unsigned AllocationTracker::functionInfoIndexForVMState(StateTag state) {
+ if (state != OTHER) return 0;
+ if (info_index_for_other_state_ == 0) {
+ FunctionInfo* info = new FunctionInfo();
+ info->name = "(V8 API)";
+ info_index_for_other_state_ = function_info_list_.length();
+ function_info_list_.Add(info);
}
+ return info_index_for_other_state_;
}
@@ -267,6 +353,7 @@ AllocationTracker::UnresolvedLocation::~UnresolvedLocation() {
void AllocationTracker::UnresolvedLocation::Resolve() {
if (script_.is_null()) return;
+ HandleScope scope(script_->GetIsolate());
info_->line = GetScriptLineNumber(script_, start_position_);
info_->column = GetScriptColumnNumber(script_, start_position_);
}
diff --git a/deps/v8/src/allocation-tracker.h b/deps/v8/src/allocation-tracker.h
index 1a5dc9e12..b876d7d14 100644
--- a/deps/v8/src/allocation-tracker.h
+++ b/deps/v8/src/allocation-tracker.h
@@ -28,6 +28,8 @@
#ifndef V8_ALLOCATION_TRACKER_H_
#define V8_ALLOCATION_TRACKER_H_
+#include <map>
+
namespace v8 {
namespace internal {
@@ -38,13 +40,13 @@ class AllocationTraceTree;
class AllocationTraceNode {
public:
AllocationTraceNode(AllocationTraceTree* tree,
- SnapshotObjectId shared_function_info_id);
+ unsigned function_info_index);
~AllocationTraceNode();
- AllocationTraceNode* FindChild(SnapshotObjectId shared_function_info_id);
- AllocationTraceNode* FindOrAddChild(SnapshotObjectId shared_function_info_id);
+ AllocationTraceNode* FindChild(unsigned function_info_index);
+ AllocationTraceNode* FindOrAddChild(unsigned function_info_index);
void AddAllocation(unsigned size);
- SnapshotObjectId function_id() const { return function_id_; }
+ unsigned function_info_index() const { return function_info_index_; }
unsigned allocation_size() const { return total_size_; }
unsigned allocation_count() const { return allocation_count_; }
unsigned id() const { return id_; }
@@ -54,7 +56,7 @@ class AllocationTraceNode {
private:
AllocationTraceTree* tree_;
- SnapshotObjectId function_id_;
+ unsigned function_info_index_;
unsigned total_size_;
unsigned allocation_count_;
unsigned id_;
@@ -68,7 +70,7 @@ class AllocationTraceTree {
public:
AllocationTraceTree();
~AllocationTraceTree();
- AllocationTraceNode* AddPathFromEnd(const Vector<SnapshotObjectId>& path);
+ AllocationTraceNode* AddPathFromEnd(const Vector<unsigned>& path);
AllocationTraceNode* root() { return &root_; }
unsigned next_node_id() { return next_node_id_++; }
void Print(AllocationTracker* tracker);
@@ -81,11 +83,36 @@ class AllocationTraceTree {
};
+class AddressToTraceMap {
+ public:
+ void AddRange(Address addr, int size, unsigned node_id);
+ unsigned GetTraceNodeId(Address addr);
+ void MoveObject(Address from, Address to, int size);
+ void Clear();
+ size_t size() { return ranges_.size(); }
+ void Print();
+
+ private:
+ struct RangeStack {
+ RangeStack(Address start, unsigned node_id)
+ : start(start), trace_node_id(node_id) {}
+ Address start;
+ unsigned trace_node_id;
+ };
+ // [start, end) -> trace
+ typedef std::map<Address, RangeStack> RangeMap;
+
+ void RemoveRange(Address start, Address end);
+
+ RangeMap ranges_;
+};
+
class AllocationTracker {
public:
struct FunctionInfo {
FunctionInfo();
const char* name;
+ SnapshotObjectId function_id;
const char* script_name;
int script_id;
int line;
@@ -99,11 +126,15 @@ class AllocationTracker {
void AllocationEvent(Address addr, int size);
AllocationTraceTree* trace_tree() { return &trace_tree_; }
- HashMap* id_to_function_info() { return &id_to_function_info_; }
- FunctionInfo* GetFunctionInfo(SnapshotObjectId id);
+ const List<FunctionInfo*>& function_info_list() const {
+ return function_info_list_;
+ }
+ AddressToTraceMap* address_to_trace() { return &address_to_trace_; }
private:
- void AddFunctionInfo(SharedFunctionInfo* info, SnapshotObjectId id);
+ unsigned AddFunctionInfo(SharedFunctionInfo* info, SnapshotObjectId id);
+ static void DeleteFunctionInfo(FunctionInfo** info);
+ unsigned functionInfoIndexForVMState(StateTag state);
class UnresolvedLocation {
public:
@@ -125,9 +156,12 @@ class AllocationTracker {
HeapObjectsMap* ids_;
StringsStorage* names_;
AllocationTraceTree trace_tree_;
- SnapshotObjectId allocation_trace_buffer_[kMaxAllocationTraceLength];
- HashMap id_to_function_info_;
+ unsigned allocation_trace_buffer_[kMaxAllocationTraceLength];
+ List<FunctionInfo*> function_info_list_;
+ HashMap id_to_function_info_index_;
List<UnresolvedLocation*> unresolved_locations_;
+ unsigned info_index_for_other_state_;
+ AddressToTraceMap address_to_trace_;
DISALLOW_COPY_AND_ASSIGN(AllocationTracker);
};
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 54a3e9145..5dcf59229 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -95,11 +95,6 @@ namespace v8 {
(isolate)->handle_scope_implementer(); \
handle_scope_implementer->DecrementCallDepth(); \
if (has_pending_exception) { \
- if (handle_scope_implementer->CallDepthIsZero() && \
- (isolate)->is_out_of_memory()) { \
- if (!(isolate)->ignore_out_of_memory()) \
- i::V8::FatalProcessOutOfMemory(NULL); \
- } \
bool call_depth_is_zero = handle_scope_implementer->CallDepthIsZero(); \
(isolate)->OptionalRescheduleException(call_depth_is_zero); \
do_callback \
@@ -560,8 +555,8 @@ void V8::MakeWeak(i::Object** object,
}
-void V8::ClearWeak(i::Object** obj) {
- i::GlobalHandles::ClearWeakness(obj);
+void* V8::ClearWeak(i::Object** obj) {
+ return i::GlobalHandles::ClearWeakness(obj);
}
@@ -1611,111 +1606,86 @@ ScriptData* ScriptData::New(const char* data, int length) {
}
-// --- S c r i p t ---
+// --- S c r i p t s ---
-Local<Script> Script::New(v8::Handle<String> source,
- v8::ScriptOrigin* origin,
- v8::ScriptData* pre_data,
- v8::Handle<String> script_data) {
- i::Handle<i::String> str = Utils::OpenHandle(*source);
- i::Isolate* isolate = str->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::New()", return Local<Script>());
- LOG_API(isolate, "Script::New");
- ENTER_V8(isolate);
- i::SharedFunctionInfo* raw_result = NULL;
- { i::HandleScope scope(isolate);
- i::Handle<i::Object> name_obj;
- int line_offset = 0;
- int column_offset = 0;
- bool is_shared_cross_origin = false;
- if (origin != NULL) {
- if (!origin->ResourceName().IsEmpty()) {
- name_obj = Utils::OpenHandle(*origin->ResourceName());
- }
- if (!origin->ResourceLineOffset().IsEmpty()) {
- line_offset = static_cast<int>(origin->ResourceLineOffset()->Value());
- }
- if (!origin->ResourceColumnOffset().IsEmpty()) {
- column_offset =
- static_cast<int>(origin->ResourceColumnOffset()->Value());
- }
- if (!origin->ResourceIsSharedCrossOrigin().IsEmpty()) {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- is_shared_cross_origin =
- origin->ResourceIsSharedCrossOrigin() == v8::True(v8_isolate);
- }
- }
- EXCEPTION_PREAMBLE(isolate);
- i::ScriptDataImpl* pre_data_impl =
- static_cast<i::ScriptDataImpl*>(pre_data);
- // We assert that the pre-data is sane, even though we can actually
- // handle it if it turns out not to be in release mode.
- ASSERT(pre_data_impl == NULL || pre_data_impl->SanityCheck());
- // If the pre-data isn't sane we simply ignore it
- if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
- pre_data_impl = NULL;
- }
- i::Handle<i::SharedFunctionInfo> result =
- i::Compiler::CompileScript(str,
- name_obj,
- line_offset,
- column_offset,
- is_shared_cross_origin,
- isolate->global_context(),
- NULL,
- pre_data_impl,
- Utils::OpenHandle(*script_data, true),
- i::NOT_NATIVES_CODE);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
- raw_result = *result;
+// Internally, UnboundScript is a SharedFunctionInfo, and Script is a
+// JSFunction.
+
+ScriptCompiler::CachedData::CachedData(const uint8_t* data_, int length_,
+ BufferPolicy buffer_policy_)
+ : data(data_), length(length_), buffer_policy(buffer_policy_) {}
+
+
+ScriptCompiler::CachedData::~CachedData() {
+ if (buffer_policy == BufferOwned) {
+ delete[] data;
}
- i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
- return ToApiHandle<Script>(result);
}
-Local<Script> Script::New(v8::Handle<String> source,
- v8::Handle<Value> file_name) {
- ScriptOrigin origin(file_name);
- return New(source, &origin);
+Local<Script> UnboundScript::BindToCurrentContext() {
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Handle<i::SharedFunctionInfo>
+ function_info(i::SharedFunctionInfo::cast(*obj), obj->GetIsolate());
+ i::Handle<i::JSFunction> function =
+ obj->GetIsolate()->factory()->NewFunctionFromSharedFunctionInfo(
+ function_info, obj->GetIsolate()->global_context());
+ return ToApiHandle<Script>(function);
}
-Local<Script> Script::Compile(v8::Handle<String> source,
- v8::ScriptOrigin* origin,
- v8::ScriptData* pre_data,
- v8::Handle<String> script_data) {
- i::Handle<i::String> str = Utils::OpenHandle(*source);
- i::Isolate* isolate = str->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::Compile()", return Local<Script>());
- LOG_API(isolate, "Script::Compile");
- ENTER_V8(isolate);
- Local<Script> generic = New(source, origin, pre_data, script_data);
- if (generic.IsEmpty())
- return generic;
- i::Handle<i::Object> obj = Utils::OpenHandle(*generic);
- i::Handle<i::SharedFunctionInfo> function =
- i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj));
- i::Handle<i::JSFunction> result =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
- function,
- isolate->global_context());
- return ToApiHandle<Script>(result);
+int UnboundScript::GetId() {
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
+ ON_BAILOUT(isolate, "v8::UnboundScript::GetId()", return -1);
+ LOG_API(isolate, "v8::UnboundScript::GetId");
+ {
+ i::HandleScope scope(isolate);
+ i::Handle<i::SharedFunctionInfo> function_info(
+ i::SharedFunctionInfo::cast(*obj));
+ i::Handle<i::Script> script(i::Script::cast(function_info->script()));
+ return script->id()->value();
+ }
}
-Local<Script> Script::Compile(v8::Handle<String> source,
- v8::Handle<Value> file_name,
- v8::Handle<String> script_data) {
- ScriptOrigin origin(file_name);
- return Compile(source, &origin, 0, script_data);
+int UnboundScript::GetLineNumber(int code_pos) {
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
+ ON_BAILOUT(isolate, "v8::UnboundScript::GetLineNumber()", return -1);
+ LOG_API(isolate, "UnboundScript::GetLineNumber");
+ if (obj->IsScript()) {
+ i::Handle<i::Script> script(i::Script::cast(*obj));
+ return i::GetScriptLineNumber(script, code_pos);
+ } else {
+ return -1;
+ }
+}
+
+
+Handle<Value> UnboundScript::GetScriptName() {
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
+ ON_BAILOUT(isolate, "v8::UnboundScript::GetName()",
+ return Handle<String>());
+ LOG_API(isolate, "UnboundScript::GetName");
+ if (obj->IsScript()) {
+ i::Object* name = i::Script::cast(*obj)->name();
+ return Utils::ToLocal(i::Handle<i::Object>(name, isolate));
+ } else {
+ return Handle<String>();
+ }
}
Local<Value> Script::Run() {
- // If execution is terminating, Compile(script)->Run() requires this check.
+ // If execution is terminating, Compile(..)->Run() requires this
+ // check.
if (this == NULL) return Local<Value>();
i::Handle<i::HeapObject> obj =
i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
@@ -1728,15 +1698,8 @@ Local<Value> Script::Run() {
i::Object* raw_result = NULL;
{
i::HandleScope scope(isolate);
- i::Handle<i::JSFunction> fun;
- if (obj->IsSharedFunctionInfo()) {
- i::Handle<i::SharedFunctionInfo>
- function_info(i::SharedFunctionInfo::cast(*obj), isolate);
- fun = isolate->factory()->NewFunctionFromSharedFunctionInfo(
- function_info, isolate->global_context());
- } else {
- fun = i::Handle<i::JSFunction>(i::JSFunction::cast(*obj), isolate);
- }
+ i::Handle<i::JSFunction> fun =
+ i::Handle<i::JSFunction>(i::JSFunction::cast(*obj), isolate);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> receiver(
isolate->context()->global_proxy(), isolate);
@@ -1750,78 +1713,149 @@ Local<Value> Script::Run() {
}
-static i::Handle<i::SharedFunctionInfo> OpenScript(Script* script) {
- i::Handle<i::Object> obj = Utils::OpenHandle(script);
- i::Handle<i::SharedFunctionInfo> result;
- if (obj->IsSharedFunctionInfo()) {
- result =
- i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj));
- } else {
- result =
- i::Handle<i::SharedFunctionInfo>(i::JSFunction::cast(*obj)->shared());
+Local<UnboundScript> Script::GetUnboundScript() {
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return ToApiHandle<UnboundScript>(
+ i::Handle<i::SharedFunctionInfo>(i::JSFunction::cast(*obj)->shared()));
+}
+
+
+Local<UnboundScript> ScriptCompiler::CompileUnbound(
+ Isolate* v8_isolate,
+ Source* source,
+ CompileOptions options) {
+ i::ScriptDataImpl* script_data_impl = NULL;
+ i::CachedDataMode cached_data_mode = i::NO_CACHED_DATA;
+ if (options & kProduceDataToCache) {
+ cached_data_mode = i::PRODUCE_CACHED_DATA;
+ ASSERT(source->cached_data == NULL);
+ if (source->cached_data) {
+ // Asked to produce cached data even though there is some already -> not
+ // good. In release mode, try to do the right thing: Just regenerate the
+ // data.
+ delete source->cached_data;
+ source->cached_data = NULL;
+ }
+ } else if (source->cached_data) {
+ // FIXME(marja): Make compiler use CachedData directly. Aligning needs to be
+ // taken care of.
+ script_data_impl = static_cast<i::ScriptDataImpl*>(ScriptData::New(
+ reinterpret_cast<const char*>(source->cached_data->data),
+ source->cached_data->length));
+ // We assert that the pre-data is sane, even though we can actually
+ // handle it if it turns out not to be in release mode.
+ ASSERT(script_data_impl->SanityCheck());
+ if (script_data_impl->SanityCheck()) {
+ cached_data_mode = i::CONSUME_CACHED_DATA;
+ } else {
+ // If the pre-data isn't sane we simply ignore it.
+ delete script_data_impl;
+ script_data_impl = NULL;
+ delete source->cached_data;
+ source->cached_data = NULL;
+ }
}
- return result;
-}
-
-int Script::GetId() {
- i::Handle<i::HeapObject> obj =
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
- i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::Id()", return -1);
- LOG_API(isolate, "Script::Id");
- {
- i::HandleScope scope(isolate);
- i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
- i::Handle<i::Script> script(i::Script::cast(function_info->script()));
- return script->id()->value();
+ i::Handle<i::String> str = Utils::OpenHandle(*(source->source_string));
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ON_BAILOUT(isolate, "v8::ScriptCompiler::CompileUnbound()",
+ return Local<UnboundScript>());
+ LOG_API(isolate, "ScriptCompiler::CompileUnbound");
+ ENTER_V8(isolate);
+ i::SharedFunctionInfo* raw_result = NULL;
+ { i::HandleScope scope(isolate);
+ i::Handle<i::Object> name_obj;
+ int line_offset = 0;
+ int column_offset = 0;
+ bool is_shared_cross_origin = false;
+ if (!source->resource_name.IsEmpty()) {
+ name_obj = Utils::OpenHandle(*(source->resource_name));
+ }
+ if (!source->resource_line_offset.IsEmpty()) {
+ line_offset = static_cast<int>(source->resource_line_offset->Value());
+ }
+ if (!source->resource_column_offset.IsEmpty()) {
+ column_offset =
+ static_cast<int>(source->resource_column_offset->Value());
+ }
+ if (!source->resource_is_shared_cross_origin.IsEmpty()) {
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ is_shared_cross_origin =
+ source->resource_is_shared_cross_origin == v8::True(v8_isolate);
+ }
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::SharedFunctionInfo> result =
+ i::Compiler::CompileScript(str,
+ name_obj,
+ line_offset,
+ column_offset,
+ is_shared_cross_origin,
+ isolate->global_context(),
+ NULL,
+ &script_data_impl,
+ cached_data_mode,
+ i::NOT_NATIVES_CODE);
+ has_pending_exception = result.is_null();
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<UnboundScript>());
+ raw_result = *result;
+ if ((options & kProduceDataToCache) && script_data_impl != NULL) {
+ // script_data_impl now contains the data that was generated. source will
+ // take the ownership.
+ source->cached_data = new CachedData(
+ reinterpret_cast<const uint8_t*>(script_data_impl->Data()),
+ script_data_impl->Length(), CachedData::BufferOwned);
+ script_data_impl->owns_store_ = false;
+ }
+ delete script_data_impl;
}
+ i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
+ return ToApiHandle<UnboundScript>(result);
}
-int Script::GetLineNumber(int code_pos) {
- i::Handle<i::HeapObject> obj =
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
- i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::GetLineNumber()", return -1);
- LOG_API(isolate, "Script::GetLineNumber");
- if (obj->IsScript()) {
- i::Handle<i::Script> script = i::Handle<i::Script>(i::Script::cast(*obj));
- return i::GetScriptLineNumber(script, code_pos);
- } else {
- return -1;
- }
+Local<Script> ScriptCompiler::Compile(
+ Isolate* v8_isolate,
+ Source* source,
+ CompileOptions options) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ON_BAILOUT(isolate, "v8::ScriptCompiler::Compile()",
+ return Local<Script>());
+ LOG_API(isolate, "ScriptCompiler::CompiletBound()");
+ ENTER_V8(isolate);
+ Local<UnboundScript> generic =
+ CompileUnbound(v8_isolate, source, options);
+ if (generic.IsEmpty()) return Local<Script>();
+ return generic->BindToCurrentContext();
}
-Handle<Value> Script::GetScriptName() {
- i::Handle<i::HeapObject> obj =
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
- i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::GetName()", return Handle<String>());
- LOG_API(isolate, "Script::GetName");
- if (obj->IsScript()) {
- i::Object* name = i::Script::cast(*obj)->name();
- return Utils::ToLocal(i::Handle<i::Object>(name, isolate));
- } else {
- return Handle<String>();
+Local<Script> Script::Compile(v8::Handle<String> source,
+ v8::ScriptOrigin* origin,
+ ScriptData* script_data) {
+ i::Handle<i::String> str = Utils::OpenHandle(*source);
+ ScriptCompiler::CachedData* cached_data = NULL;
+ if (script_data) {
+ cached_data = new ScriptCompiler::CachedData(
+ reinterpret_cast<const uint8_t*>(script_data->Data()),
+ script_data->Length());
+ }
+ if (origin) {
+ ScriptCompiler::Source script_source(source, *origin, cached_data);
+ return ScriptCompiler::Compile(
+ reinterpret_cast<v8::Isolate*>(str->GetIsolate()),
+ &script_source);
}
+ ScriptCompiler::Source script_source(source, cached_data);
+ return ScriptCompiler::Compile(
+ reinterpret_cast<v8::Isolate*>(str->GetIsolate()),
+ &script_source);
}
-void Script::SetData(v8::Handle<String> data) {
- i::Handle<i::HeapObject> obj =
- i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
- i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::SetData()", return);
- LOG_API(isolate, "Script::SetData");
- {
- i::HandleScope scope(isolate);
- i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
- i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
- i::Handle<i::Script> script(i::Script::cast(function_info->script()));
- script->set_data(*raw_data);
- }
+Local<Script> Script::Compile(v8::Handle<String> source,
+ v8::Handle<String> file_name) {
+ ScriptOrigin origin(file_name);
+ return Compile(source, &origin);
}
@@ -1980,21 +2014,6 @@ v8::Handle<Value> Message::GetScriptResourceName() const {
}
-v8::Handle<Value> Message::GetScriptData() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
- EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- // Return this.script.data.
- i::Handle<i::JSValue> script =
- i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script(),
- isolate));
- i::Handle<i::Object> data(i::Script::cast(script->value())->data(), isolate);
- return scope.Escape(Utils::ToLocal(data));
-}
-
-
v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
@@ -2153,9 +2172,10 @@ Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
ENTER_V8(isolate);
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSArray> self = Utils::OpenHandle(this);
- i::Object* raw_object = self->GetElementNoExceptionThrown(isolate, index);
- i::Handle<i::JSObject> obj(i::JSObject::cast(raw_object));
- return scope.Escape(Utils::StackFrameToLocal(obj));
+ i::Handle<i::Object> obj =
+ i::Object::GetElementNoExceptionThrown(isolate, self, index);
+ i::Handle<i::JSObject> jsobj = i::Handle<i::JSObject>::cast(obj);
+ return scope.Escape(Utils::StackFrameToLocal(jsobj));
}
@@ -2686,6 +2706,20 @@ void v8::Array::CheckCast(Value* that) {
}
+void v8::Promise::CheckCast(Value* that) {
+ Utils::ApiCheck(that->IsPromise(),
+ "v8::Promise::Cast()",
+ "Could not convert to promise");
+}
+
+
+void v8::Promise::Resolver::CheckCast(Value* that) {
+ Utils::ApiCheck(that->IsPromise(),
+ "v8::Promise::Resolver::Cast()",
+ "Could not convert to promise resolver");
+}
+
+
void v8::ArrayBuffer::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsJSArrayBuffer(),
@@ -3023,7 +3057,7 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
key_obj,
value_obj,
static_cast<PropertyAttributes>(attribs),
- i::kNonStrictMode);
+ i::SLOPPY);
has_pending_exception = obj.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, false);
return true;
@@ -3043,7 +3077,7 @@ bool v8::Object::Set(uint32_t index, v8::Handle<Value> value) {
index,
value_obj,
NONE,
- i::kNonStrictMode);
+ i::SLOPPY);
has_pending_exception = obj.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, false);
return true;
@@ -3148,7 +3182,8 @@ PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) {
EXCEPTION_BAILOUT_CHECK(isolate, static_cast<PropertyAttribute>(NONE));
}
i::Handle<i::Name> key_name = i::Handle<i::Name>::cast(key_obj);
- PropertyAttributes result = self->GetPropertyAttribute(*key_name);
+ PropertyAttributes result =
+ i::JSReceiver::GetPropertyAttribute(self, key_name);
if (result == ABSENT) return static_cast<PropertyAttribute>(NONE);
return static_cast<PropertyAttribute>(result);
}
@@ -3422,6 +3457,27 @@ bool Object::SetDeclaredAccessor(Local<String> name,
}
+void Object::SetAccessorProperty(Local<String> name,
+ Local<Function> getter,
+ Handle<Function> setter,
+ PropertyAttribute attribute,
+ AccessControl settings) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::SetAccessorProperty()", return);
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::Object> getter_i = v8::Utils::OpenHandle(*getter);
+ i::Handle<i::Object> setter_i = v8::Utils::OpenHandle(*setter, true);
+ if (setter_i.is_null()) setter_i = isolate->factory()->null_value();
+ i::JSObject::DefineAccessor(v8::Utils::OpenHandle(this),
+ v8::Utils::OpenHandle(*name),
+ getter_i,
+ setter_i,
+ static_cast<PropertyAttributes>(attribute),
+ settings);
+}
+
+
bool v8::Object::HasOwnProperty(Handle<String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::HasOwnProperty()",
@@ -3675,7 +3731,7 @@ void PrepareExternalArrayElements(i::Handle<i::JSObject> object,
isolate->factory()->NewExternalArray(length, array_type, data);
i::Handle<i::Map> external_array_map =
- isolate->factory()->GetElementsTransitionMap(
+ i::JSObject::GetElementsTransitionMap(
object,
GetElementsKindFromExternalArrayType(array_type));
@@ -4056,7 +4112,9 @@ bool Function::IsBuiltin() const {
int Function::ScriptId() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- if (!func->shared()->script()->IsScript()) return v8::Script::kNoScriptId;
+ if (!func->shared()->script()->IsScript()) {
+ return v8::UnboundScript::kNoScriptId;
+ }
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
return script->id()->value();
}
@@ -5054,8 +5112,8 @@ int v8::V8::ContextDisposedNotification() {
}
-bool v8::V8::InitializeICU() {
- return i::InitializeICU();
+bool v8::V8::InitializeICU(const char* icu_data_file) {
+ return i::InitializeICU(icu_data_file);
}
@@ -5172,12 +5230,6 @@ Handle<Value> v8::Context::GetSecurityToken() {
}
-bool Context::HasOutOfMemoryException() {
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- return env->has_out_of_memory();
-}
-
-
v8::Isolate* Context::GetIsolate() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
return reinterpret_cast<Isolate*>(env->GetIsolate());
@@ -5351,6 +5403,8 @@ inline Local<String> NewString(Isolate* v8_isolate,
if (length == -1) length = StringLength(data);
i::Handle<i::String> result = NewString(
isolate->factory(), type, i::Vector<const Char>(data, length));
+ // We do not expect this to fail. Change this if it does.
+ CHECK(!result.is_null());
if (type == String::kUndetectableString) {
result->MarkAsUndetectable();
}
@@ -5408,6 +5462,8 @@ Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
i::Handle<i::String> right_string = Utils::OpenHandle(*right);
i::Handle<i::String> result = isolate->factory()->NewConsString(left_string,
right_string);
+ // We do not expect this to fail. Change this if it does.
+ CHECK(!result.is_null());
return Utils::ToLocal(result);
}
@@ -5415,14 +5471,22 @@ Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
static i::Handle<i::String> NewExternalStringHandle(
i::Isolate* isolate,
v8::String::ExternalStringResource* resource) {
- return isolate->factory()->NewExternalStringFromTwoByte(resource);
+ i::Handle<i::String> result =
+ isolate->factory()->NewExternalStringFromTwoByte(resource);
+ // We do not expect this to fail. Change this if it does.
+ CHECK(!result.is_null());
+ return result;
}
static i::Handle<i::String> NewExternalAsciiStringHandle(
i::Isolate* isolate,
v8::String::ExternalAsciiStringResource* resource) {
- return isolate->factory()->NewExternalStringFromAscii(resource);
+ i::Handle<i::String> result =
+ isolate->factory()->NewExternalStringFromAscii(resource);
+ // We do not expect this to fail. Change this if it does.
+ CHECK(!result.is_null());
+ return result;
}
@@ -5653,30 +5717,18 @@ void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) {
i_isolate->date_cache()->ResetDateCache();
- i::HandleScope scope(i_isolate);
- // Get the function ResetDateCache (defined in date.js).
- i::Handle<i::String> func_name_str =
- i_isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("ResetDateCache"));
- i::MaybeObject* result =
- i_isolate->js_builtins_object()->GetProperty(*func_name_str);
- i::Object* object_func;
- if (!result->ToObject(&object_func)) {
+ if (!i_isolate->eternal_handles()->Exists(
+ i::EternalHandles::DATE_CACHE_VERSION)) {
return;
}
-
- if (object_func->IsJSFunction()) {
- i::Handle<i::JSFunction> func =
- i::Handle<i::JSFunction>(i::JSFunction::cast(object_func));
-
- // Call ResetDateCache(0 but expect no exceptions:
- bool caught_exception = false;
- i::Execution::TryCall(func,
- i_isolate->js_builtins_object(),
- 0,
- NULL,
- &caught_exception);
- }
+ i::Handle<i::FixedArray> date_cache_version =
+ i::Handle<i::FixedArray>::cast(i_isolate->eternal_handles()->GetSingleton(
+ i::EternalHandles::DATE_CACHE_VERSION));
+ ASSERT_EQ(1, date_cache_version->length());
+ CHECK(date_cache_version->get(0)->IsSmi());
+ date_cache_version->set(
+ 0,
+ i::Smi::FromInt(i::Smi::cast(date_cache_version->get(0))->value() + 1));
}
@@ -5778,6 +5830,130 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
}
+bool Value::IsPromise() const {
+ i::Handle<i::Object> val = Utils::OpenHandle(this);
+ if (!val->IsJSObject()) return false;
+ i::Handle<i::JSObject> obj = i::Handle<i::JSObject>::cast(val);
+ i::Isolate* isolate = obj->GetIsolate();
+ LOG_API(isolate, "IsPromise");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> argv[] = { obj };
+ i::Handle<i::Object> b = i::Execution::Call(
+ isolate,
+ handle(
+ isolate->context()->global_object()->native_context()->is_promise()),
+ isolate->factory()->undefined_value(),
+ ARRAY_SIZE(argv), argv,
+ &has_pending_exception,
+ false);
+ EXCEPTION_BAILOUT_CHECK(isolate, false);
+ return b->BooleanValue();
+}
+
+
+Local<Promise::Resolver> Promise::Resolver::New(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ LOG_API(isolate, "Promise::Resolver::New");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> result = i::Execution::Call(
+ isolate,
+ handle(isolate->context()->global_object()->native_context()->
+ promise_create()),
+ isolate->factory()->undefined_value(),
+ 0, NULL,
+ &has_pending_exception,
+ false);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise::Resolver>());
+ return Local<Promise::Resolver>::Cast(Utils::ToLocal(result));
+}
+
+
+Local<Promise> Promise::Resolver::GetPromise() {
+ i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ return Local<Promise>::Cast(Utils::ToLocal(promise));
+}
+
+
+void Promise::Resolver::Resolve(Handle<Value> value) {
+ i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ i::Isolate* isolate = promise->GetIsolate();
+ LOG_API(isolate, "Promise::Resolver::Resolve");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> argv[] = { promise, Utils::OpenHandle(*value) };
+ i::Execution::Call(
+ isolate,
+ handle(isolate->context()->global_object()->native_context()->
+ promise_resolve()),
+ isolate->factory()->undefined_value(),
+ ARRAY_SIZE(argv), argv,
+ &has_pending_exception,
+ false);
+ EXCEPTION_BAILOUT_CHECK(isolate, /* void */ ;);
+}
+
+
+void Promise::Resolver::Reject(Handle<Value> value) {
+ i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ i::Isolate* isolate = promise->GetIsolate();
+ LOG_API(isolate, "Promise::Resolver::Reject");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> argv[] = { promise, Utils::OpenHandle(*value) };
+ i::Execution::Call(
+ isolate,
+ handle(isolate->context()->global_object()->native_context()->
+ promise_reject()),
+ isolate->factory()->undefined_value(),
+ ARRAY_SIZE(argv), argv,
+ &has_pending_exception,
+ false);
+ EXCEPTION_BAILOUT_CHECK(isolate, /* void */ ;);
+}
+
+
+Local<Promise> Promise::Chain(Handle<Function> handler) {
+ i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ i::Isolate* isolate = promise->GetIsolate();
+ LOG_API(isolate, "Promise::Chain");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
+ i::Handle<i::Object> result = i::Execution::Call(
+ isolate,
+ handle(isolate->context()->global_object()->native_context()->
+ promise_chain()),
+ promise,
+ ARRAY_SIZE(argv), argv,
+ &has_pending_exception,
+ false);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>());
+ return Local<Promise>::Cast(Utils::ToLocal(result));
+}
+
+
+Local<Promise> Promise::Catch(Handle<Function> handler) {
+ i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ i::Isolate* isolate = promise->GetIsolate();
+ LOG_API(isolate, "Promise::Catch");
+ ENTER_V8(isolate);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
+ i::Handle<i::Object> result = i::Execution::Call(
+ isolate,
+ handle(isolate->context()->global_object()->native_context()->
+ promise_catch()),
+ promise,
+ ARRAY_SIZE(argv), argv,
+ &has_pending_exception,
+ false);
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>());
+ return Local<Promise>::Cast(Utils::ToLocal(result));
+}
+
+
bool v8::ArrayBuffer::IsExternal() const {
return Utils::OpenHandle(this)->is_external();
}
@@ -5842,8 +6018,15 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
- ASSERT(obj->buffer()->IsJSArrayBuffer());
- i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer()));
+ i::Handle<i::JSArrayBuffer> buffer;
+ if (obj->IsJSDataView()) {
+ i::Handle<i::JSDataView> data_view(i::JSDataView::cast(*obj));
+ ASSERT(data_view->buffer()->IsJSArrayBuffer());
+ buffer = i::handle(i::JSArrayBuffer::cast(data_view->buffer()));
+ } else {
+ ASSERT(obj->IsJSTypedArray());
+ buffer = i::JSTypedArray::cast(*obj)->GetBuffer();
+ }
return Utils::ToLocal(buffer);
}
@@ -5914,7 +6097,9 @@ i::Handle<i::JSTypedArray> NewTypedArray(
isolate->factory()->NewExternalArray(
static_cast<int>(length), array_type,
static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
- obj->set_elements(*elements);
+ i::Handle<i::Map> map =
+ i::JSObject::GetElementsTransitionMap(obj, elements_kind);
+ obj->set_map_and_elements(*map, *elements);
return obj;
}
@@ -5954,40 +6139,84 @@ Local<DataView> DataView::New(Handle<ArrayBuffer> array_buffer,
}
-Local<Symbol> v8::Symbol::New(Isolate* isolate, const char* data, int length) {
+Local<Symbol> v8::Symbol::New(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
EnsureInitializedForIsolate(i_isolate, "v8::Symbol::New()");
LOG_API(i_isolate, "Symbol::New()");
ENTER_V8(i_isolate);
i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
- if (data != NULL) {
- if (length == -1) length = i::StrLength(data);
- i::Handle<i::String> name = i_isolate->factory()->NewStringFromUtf8(
- i::Vector<const char>(data, length));
- result->set_name(*name);
- }
+ if (!name.IsEmpty()) result->set_name(*Utils::OpenHandle(*name));
return Utils::ToLocal(result);
}
-Local<Private> v8::Private::New(
- Isolate* isolate, const char* data, int length) {
+Local<Symbol> v8::Symbol::For(Isolate* isolate, Local<String> name) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::String> i_name = Utils::OpenHandle(*name);
+ i::Handle<i::JSObject> registry = i_isolate->GetSymbolRegistry();
+ i::Handle<i::String> part = i_isolate->factory()->for_string();
+ i::Handle<i::JSObject> symbols =
+ i::Handle<i::JSObject>::cast(i::JSObject::GetProperty(registry, part));
+ i::Handle<i::Object> symbol = i::JSObject::GetProperty(symbols, i_name);
+ if (!symbol->IsSymbol()) {
+ ASSERT(symbol->IsUndefined());
+ symbol = i_isolate->factory()->NewSymbol();
+ i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name);
+ i::JSObject::SetProperty(symbols, i_name, symbol, NONE, i::STRICT);
+ }
+ return Utils::ToLocal(i::Handle<i::Symbol>::cast(symbol));
+}
+
+
+Local<Symbol> v8::Symbol::ForApi(Isolate* isolate, Local<String> name) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::String> i_name = Utils::OpenHandle(*name);
+ i::Handle<i::JSObject> registry = i_isolate->GetSymbolRegistry();
+ i::Handle<i::String> part = i_isolate->factory()->for_api_string();
+ i::Handle<i::JSObject> symbols =
+ i::Handle<i::JSObject>::cast(i::JSObject::GetProperty(registry, part));
+ i::Handle<i::Object> symbol = i::JSObject::GetProperty(symbols, i_name);
+ if (!symbol->IsSymbol()) {
+ ASSERT(symbol->IsUndefined());
+ symbol = i_isolate->factory()->NewSymbol();
+ i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name);
+ i::JSObject::SetProperty(symbols, i_name, symbol, NONE, i::STRICT);
+ }
+ return Utils::ToLocal(i::Handle<i::Symbol>::cast(symbol));
+}
+
+
+Local<Private> v8::Private::New(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
EnsureInitializedForIsolate(i_isolate, "v8::Private::New()");
LOG_API(i_isolate, "Private::New()");
ENTER_V8(i_isolate);
i::Handle<i::Symbol> symbol = i_isolate->factory()->NewPrivateSymbol();
- if (data != NULL) {
- if (length == -1) length = i::StrLength(data);
- i::Handle<i::String> name = i_isolate->factory()->NewStringFromUtf8(
- i::Vector<const char>(data, length));
- symbol->set_name(*name);
- }
+ if (!name.IsEmpty()) symbol->set_name(*Utils::OpenHandle(*name));
Local<Symbol> result = Utils::ToLocal(symbol);
return v8::Handle<Private>(reinterpret_cast<Private*>(*result));
}
+Local<Private> v8::Private::ForApi(Isolate* isolate, Local<String> name) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::String> i_name = Utils::OpenHandle(*name);
+ i::Handle<i::JSObject> registry = i_isolate->GetSymbolRegistry();
+ i::Handle<i::String> part = i_isolate->factory()->private_api_string();
+ i::Handle<i::JSObject> privates =
+ i::Handle<i::JSObject>::cast(i::JSObject::GetProperty(registry, part));
+ i::Handle<i::Object> symbol = i::JSObject::GetProperty(privates, i_name);
+ if (!symbol->IsSymbol()) {
+ ASSERT(symbol->IsUndefined());
+ symbol = i_isolate->factory()->NewPrivateSymbol();
+ i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name);
+ i::JSObject::SetProperty(privates, i_name, symbol, NONE, i::STRICT);
+ }
+ Local<Symbol> result = Utils::ToLocal(i::Handle<i::Symbol>::cast(symbol));
+ return v8::Handle<Private>(reinterpret_cast<Private*>(*result));
+}
+
+
Local<Number> v8::Number::New(Isolate* isolate, double value) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
ASSERT(internal_isolate->IsInitialized());
@@ -6027,11 +6256,6 @@ Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) {
}
-void V8::IgnoreOutOfMemoryException() {
- EnterIsolateIfNeeded()->set_ignore_out_of_memory(true);
-}
-
-
bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::V8::AddMessageListener()");
@@ -6280,6 +6504,25 @@ void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
}
+void V8::RunMicrotasks(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::HandleScope scope(i_isolate);
+ i::V8::RunMicrotasks(i_isolate);
+}
+
+
+void V8::EnqueueMicrotask(Isolate* isolate, Handle<Function> microtask) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
+ i::Execution::EnqueueMicrotask(i_isolate, Utils::OpenHandle(*microtask));
+}
+
+
+void V8::SetAutorunMicrotasks(Isolate* isolate, bool autorun) {
+ reinterpret_cast<i::Isolate*>(isolate)->set_autorun_microtasks(autorun);
+}
+
+
void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
i::V8::RemoveCallCompletedCallback(callback);
}
@@ -6369,6 +6612,47 @@ void Isolate::Exit() {
}
+Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope(
+ Isolate* isolate,
+ Isolate::DisallowJavascriptExecutionScope::OnFailure on_failure)
+ : on_failure_(on_failure) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ if (on_failure_ == CRASH_ON_FAILURE) {
+ internal_ = reinterpret_cast<void*>(
+ new i::DisallowJavascriptExecution(i_isolate));
+ } else {
+ ASSERT_EQ(THROW_ON_FAILURE, on_failure);
+ internal_ = reinterpret_cast<void*>(
+ new i::ThrowOnJavascriptExecution(i_isolate));
+ }
+}
+
+
+Isolate::DisallowJavascriptExecutionScope::~DisallowJavascriptExecutionScope() {
+ if (on_failure_ == CRASH_ON_FAILURE) {
+ delete reinterpret_cast<i::DisallowJavascriptExecution*>(internal_);
+ } else {
+ delete reinterpret_cast<i::ThrowOnJavascriptExecution*>(internal_);
+ }
+}
+
+
+Isolate::AllowJavascriptExecutionScope::AllowJavascriptExecutionScope(
+ Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_assert_ = reinterpret_cast<void*>(
+ new i::AllowJavascriptExecution(i_isolate));
+ internal_throws_ = reinterpret_cast<void*>(
+ new i::NoThrowOnJavascriptExecution(i_isolate));
+}
+
+
+Isolate::AllowJavascriptExecutionScope::~AllowJavascriptExecutionScope() {
+ delete reinterpret_cast<i::AllowJavascriptExecution*>(internal_assert_);
+ delete reinterpret_cast<i::NoThrowOnJavascriptExecution*>(internal_throws_);
+}
+
+
void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
if (!isolate->IsInitialized()) {
@@ -6389,6 +6673,11 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
}
+void Isolate::SetEventLogger(LogEventCallback that) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->set_event_logger(that);
+}
+
String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current();
@@ -6727,9 +7016,12 @@ Handle<String> CpuProfileNode::GetFunctionName() const {
return ToApiHandle<String>(
isolate->factory()->InternalizeUtf8String(entry->name()));
} else {
- return ToApiHandle<String>(isolate->factory()->NewConsString(
+ i::Handle<i::String> cons = isolate->factory()->NewConsString(
isolate->factory()->InternalizeUtf8String(entry->name_prefix()),
- isolate->factory()->InternalizeUtf8String(entry->name())));
+ isolate->factory()->InternalizeUtf8String(entry->name()));
+ // We do not expect this to fail. Change this if it does.
+ CHECK(!cons.is_null());
+ return ToApiHandle<String>(cons);
}
}
@@ -6845,19 +7137,29 @@ void CpuProfiler::SetSamplingInterval(int us) {
}
-void CpuProfiler::StartCpuProfiling(Handle<String> title, bool record_samples) {
+void CpuProfiler::StartProfiling(Handle<String> title, bool record_samples) {
reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
*Utils::OpenHandle(*title), record_samples);
}
-const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title) {
- return reinterpret_cast<const CpuProfile*>(
+void CpuProfiler::StartCpuProfiling(Handle<String> title, bool record_samples) {
+ StartProfiling(title, record_samples);
+}
+
+
+CpuProfile* CpuProfiler::StopProfiling(Handle<String> title) {
+ return reinterpret_cast<CpuProfile*>(
reinterpret_cast<i::CpuProfiler*>(this)->StopProfiling(
*Utils::OpenHandle(*title)));
}
+const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title) {
+ return StopProfiling(title);
+}
+
+
void CpuProfiler::SetIdle(bool is_idle) {
i::Isolate* isolate = reinterpret_cast<i::CpuProfiler*>(this)->isolate();
i::StateTag state = isolate->current_vm_state();
@@ -6939,6 +7241,13 @@ SnapshotObjectId HeapGraphNode::GetId() const {
int HeapGraphNode::GetSelfSize() const {
+ size_t size = ToInternal(this)->self_size();
+ CHECK(size <= static_cast<size_t>(internal::kMaxInt));
+ return static_cast<int>(size);
+}
+
+
+size_t HeapGraphNode::GetShallowSize() const {
return ToInternal(this)->self_size();
}
@@ -7015,9 +7324,6 @@ void HeapSnapshot::Serialize(OutputStream* stream,
Utils::ApiCheck(format == kJSON,
"v8::HeapSnapshot::Serialize",
"Unknown serialization format");
- Utils::ApiCheck(stream->GetOutputEncoding() == OutputStream::kAscii,
- "v8::HeapSnapshot::Serialize",
- "Unsupported output encoding");
Utils::ApiCheck(stream->GetChunkSize() > 0,
"v8::HeapSnapshot::Serialize",
"Invalid stream chunk size");
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 9fc99d9d2..128087c89 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -183,7 +183,8 @@ class RegisteredExtension {
V(DataView, JSDataView) \
V(String, String) \
V(Symbol, Symbol) \
- V(Script, Object) \
+ V(Script, JSFunction) \
+ V(UnboundScript, SharedFunctionInfo) \
V(Function, JSFunction) \
V(Message, JSObject) \
V(Context, Context) \
diff --git a/deps/v8/src/arm/OWNERS b/deps/v8/src/arm/OWNERS
new file mode 100644
index 000000000..906a5ce64
--- /dev/null
+++ b/deps/v8/src/arm/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 3399958ee..d966380c1 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -101,7 +101,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -109,7 +109,28 @@ Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_pointer_address_at(pc_);
+ if (FLAG_enable_ool_constant_pool ||
+ Assembler::IsMovW(Memory::int32_at(pc_))) {
+ // We return the PC for ool constant pool since this function is used by the
+ // serializerer and expects the address to reside within the code object.
+ return reinterpret_cast<Address>(pc_);
+ } else {
+ ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
+ return Assembler::target_pointer_address_at(pc_);
+ }
+}
+
+
+Address RelocInfo::constant_pool_entry_address() {
+ ASSERT(IsInConstantPool());
+ if (FLAG_enable_ool_constant_pool) {
+ ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_)));
+ return Assembler::target_constant_pool_address_at(pc_,
+ host_->constant_pool());
+ } else {
+ ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
+ return Assembler::target_pointer_address_at(pc_);
+ }
}
@@ -120,7 +141,7 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, target);
+ Assembler::set_target_address_at(pc_, host_, target);
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -131,21 +152,22 @@ void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
- Assembler::target_address_at(pc_)));
+ Assembler::target_address_at(pc_, host_)));
}
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
- Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+ Assembler::set_target_address_at(pc_, host_,
+ reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
@@ -157,7 +179,7 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
Address RelocInfo::target_reference() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -268,7 +290,7 @@ void RelocInfo::WipeOut() {
IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) ||
IsExternalReference(rmode_));
- Assembler::set_target_address_at(pc_, NULL);
+ Assembler::set_target_address_at(pc_, host_, NULL);
}
@@ -402,7 +424,18 @@ Address Assembler::target_pointer_address_at(Address pc) {
}
-Address Assembler::target_address_at(Address pc) {
+Address Assembler::target_constant_pool_address_at(
+ Address pc, ConstantPoolArray* constant_pool) {
+ ASSERT(constant_pool != NULL);
+ ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+ Instr instr = Memory::int32_at(pc);
+ return reinterpret_cast<Address>(constant_pool) +
+ GetLdrRegisterImmediateOffset(instr);
+}
+
+
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
Instruction* instr = Instruction::At(pc);
@@ -410,9 +443,14 @@ Address Assembler::target_address_at(Address pc) {
return reinterpret_cast<Address>(
(next_instr->ImmedMovwMovtValue() << 16) |
instr->ImmedMovwMovtValue());
+ } else if (FLAG_enable_ool_constant_pool) {
+ ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+ return Memory::Address_at(
+ target_constant_pool_address_at(pc, constant_pool));
+ } else {
+ ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
+ return Memory::Address_at(target_pointer_address_at(pc));
}
- ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
- return Memory::Address_at(target_pointer_address_at(pc));
}
@@ -430,7 +468,8 @@ Address Assembler::target_address_from_return_address(Address pc) {
// @ return address
Address candidate = pc - 2 * Assembler::kInstrSize;
Instr candidate_instr(Memory::int32_at(candidate));
- if (IsLdrPcImmediateOffset(candidate_instr)) {
+ if (IsLdrPcImmediateOffset(candidate_instr) |
+ IsLdrPpImmediateOffset(candidate_instr)) {
return candidate;
}
candidate = pc - 3 * Assembler::kInstrSize;
@@ -441,7 +480,8 @@ Address Assembler::target_address_from_return_address(Address pc) {
Address Assembler::return_address_from_call_start(Address pc) {
- if (IsLdrPcImmediateOffset(Memory::int32_at(pc))) {
+ if (IsLdrPcImmediateOffset(Memory::int32_at(pc)) |
+ IsLdrPpImmediateOffset(Memory::int32_at(pc))) {
return pc + kInstrSize * 2;
} else {
ASSERT(IsMovW(Memory::int32_at(pc)));
@@ -452,8 +492,12 @@ Address Assembler::return_address_from_call_start(Address pc) {
void Assembler::deserialization_set_special_target_at(
- Address constant_pool_entry, Address target) {
- Memory::Address_at(constant_pool_entry) = target;
+ Address constant_pool_entry, Code* code, Address target) {
+ if (FLAG_enable_ool_constant_pool) {
+ set_target_address_at(constant_pool_entry, code, target);
+ } else {
+ Memory::Address_at(constant_pool_entry) = target;
+ }
}
@@ -463,7 +507,9 @@ static Instr EncodeMovwImmediate(uint32_t immediate) {
}
-void Assembler::set_target_address_at(Address pc, Address target) {
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
@@ -479,6 +525,10 @@ void Assembler::set_target_address_at(Address pc, Address target) {
ASSERT(IsMovW(Memory::int32_at(pc)));
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
CPU::FlushICache(pc, 2 * kInstrSize);
+ } else if (FLAG_enable_ool_constant_pool) {
+ ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+ Memory::Address_at(
+ target_constant_pool_address_at(pc, constant_pool)) = target;
} else {
ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
Memory::Address_at(target_pointer_address_at(pc)) = target;
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 35279e557..297cdcc03 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -293,10 +293,20 @@ const int RelocInfo::kApplyMask = 0;
bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on ARM means that it is a movw/movt instruction. We don't
- // generate those yet.
- return false;
+ // The deserializer needs to know whether a pointer is specially coded.  Being
+ // specially coded on ARM means that it is a movw/movt instruction, or is an
+ // out of line constant pool entry.  These only occur if
+ // FLAG_enable_ool_constant_pool is true.
+ return FLAG_enable_ool_constant_pool;
+}
+
+
+bool RelocInfo::IsInConstantPool() {
+ if (FLAG_enable_ool_constant_pool) {
+ return Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_));
+ } else {
+ return Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_));
+ }
}
@@ -344,12 +354,17 @@ Operand::Operand(Handle<Object> handle) {
Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
ASSERT(is_uint5(shift_imm));
- ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
+
rm_ = rm;
rs_ = no_reg;
shift_op_ = shift_op;
shift_imm_ = shift_imm & 31;
- if (shift_op == RRX) {
+
+ if ((shift_op == ROR) && (shift_imm == 0)) {
+ // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode
+ // RRX as ROR #0 (See below).
+ shift_op = LSL;
+ } else if (shift_op == RRX) {
// encoded as ROR with shift_imm == 0
ASSERT(shift_imm == 0);
shift_op_ = ROR;
@@ -475,9 +490,15 @@ const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
// ldr rd, [pc, #offset]
const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16;
const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16;
+// ldr rd, [pp, #offset]
+const Instr kLdrPpMask = 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPpPattern = 5 * B24 | L | kRegister_r8_Code * B16;
// vldr dd, [pc, #offset]
const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
+// vldr dd, [pp, #offset]
+const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
+const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
@@ -515,6 +536,7 @@ const Instr kLdrStrOffsetMask = 0x00000fff;
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
+ constant_pool_builder_(),
positions_recorder_(this) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
num_pending_32_bit_reloc_info_ = 0;
@@ -525,6 +547,8 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1;
last_bound_pos_ = 0;
+ constant_pool_available_ = !FLAG_enable_ool_constant_pool;
+ constant_pool_full_ = false;
ClearRecordedAstId();
}
@@ -535,11 +559,12 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
- // Emit constant pool if necessary.
- CheckConstPool(true, false);
- ASSERT(num_pending_32_bit_reloc_info_ == 0);
- ASSERT(num_pending_64_bit_reloc_info_ == 0);
-
+ if (!FLAG_enable_ool_constant_pool) {
+ // Emit constant pool if necessary.
+ CheckConstPool(true, false);
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
+ }
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@@ -722,6 +747,13 @@ bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
}
+bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
+ // Check the instruction is indeed a
+ // ldr<cond> <Rd>, [pp +/- offset_12].
+ return (instr & kLdrPpMask) == kLdrPpPattern;
+}
+
+
bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
// Check the instruction is indeed a
// vldr<cond> <Dd>, [pc +/- offset_10].
@@ -729,6 +761,13 @@ bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
}
+bool Assembler::IsVldrDPpImmediateOffset(Instr instr) {
+ // Check the instruction is indeed a
+ // vldr<cond> <Dd>, [pp +/- offset_10].
+ return (instr & kVldrDPpMask) == kVldrDPpPattern;
+}
+
+
bool Assembler::IsTstImmediate(Instr instr) {
return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
(I | TST | S);
@@ -1054,14 +1093,24 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
}
-static bool use_movw_movt(const Operand& x, const Assembler* assembler) {
- if (Assembler::use_immediate_embedded_pointer_loads(assembler)) {
+static bool use_mov_immediate_load(const Operand& x,
+ const Assembler* assembler) {
+ if (assembler != NULL && !assembler->can_use_constant_pool()) {
+ // If there is no constant pool available, we must use an mov immediate.
+ // TODO(rmcilroy): enable ARMv6 support.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
return true;
- }
- if (x.must_output_reloc_info(assembler)) {
+ } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
+ (assembler == NULL || !assembler->predictable_code_size())) {
+ // Prefer movw / movt to constant pool if it is more efficient on the CPU.
+ return true;
+ } else if (x.must_output_reloc_info(assembler)) {
+ // Prefer constant pool if data is likely to be patched.
return false;
+ } else {
+ // Otherwise, use immediate load if movw / movt is available.
+ return CpuFeatures::IsSupported(ARMv7);
}
- return CpuFeatures::IsSupported(ARMv7);
}
@@ -1075,7 +1124,7 @@ bool Operand::is_single_instruction(const Assembler* assembler,
// constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- return !use_movw_movt(*this, assembler);
+ return !use_mov_immediate_load(*this, assembler);
} else {
// If this is not a mov or mvn instruction there will always an additional
// instructions - either mov or ldr. The mov might actually be two
@@ -1091,26 +1140,33 @@ bool Operand::is_single_instruction(const Assembler* assembler,
}
-void Assembler::move_32_bit_immediate(Condition cond,
- Register rd,
- SBit s,
- const Operand& x) {
- if (rd.code() != pc.code() && s == LeaveCC) {
- if (use_movw_movt(x, this)) {
- if (x.must_output_reloc_info(this)) {
- RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL);
- // Make sure the movw/movt doesn't get separated.
- BlockConstPoolFor(2);
- }
- emit(cond | 0x30*B20 | rd.code()*B12 |
- EncodeMovwImmediate(x.imm32_ & 0xffff));
- movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
- return;
- }
+void Assembler::move_32_bit_immediate(Register rd,
+ const Operand& x,
+ Condition cond) {
+ RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
+ if (x.must_output_reloc_info(this)) {
+ RecordRelocInfo(rinfo);
}
- RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
- ldr(rd, MemOperand(pc, 0), cond);
+ if (use_mov_immediate_load(x, this)) {
+ Register target = rd.code() == pc.code() ? ip : rd;
+ // TODO(rmcilroy): add ARMv6 support for immediate loads.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
+ if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) {
+ // Make sure the movw/movt doesn't get separated.
+ BlockConstPoolFor(2);
+ }
+ emit(cond | 0x30*B20 | target.code()*B12 |
+ EncodeMovwImmediate(x.imm32_ & 0xffff));
+ movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond);
+ if (target.code() != rd.code()) {
+ mov(rd, target, LeaveCC, cond);
+ }
+ } else {
+ ASSERT(can_use_constant_pool());
+ ConstantPoolAddEntry(rinfo);
+ ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
+ }
}
@@ -1133,20 +1189,9 @@ void Assembler::addrmod1(Instr instr,
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- move_32_bit_immediate(cond, rd, LeaveCC, x);
+ move_32_bit_immediate(rd, x, cond);
} else {
- if ((instr & kMovMvnMask) == kMovMvnPattern) {
- // Moves need to use a constant pool entry.
- RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
- ldr(ip, MemOperand(pc, 0), cond);
- } else if (x.must_output_reloc_info(this)) {
- // Otherwise, use most efficient form of fetching from constant pool.
- move_32_bit_immediate(cond, ip, LeaveCC, x);
- } else {
- // If this is not a mov or mvn instruction we may still be able to
- // avoid a constant pool entry by using mvn or movw.
- mov(ip, x, LeaveCC, cond);
- }
+ mov(ip, x, LeaveCC, cond);
addrmod1(instr, rn, rd, Operand(ip));
}
return;
@@ -1748,7 +1793,9 @@ void Assembler::uxtb(Register dst,
(src.shift_imm_ == 8) ||
(src.shift_imm_ == 16) ||
(src.shift_imm_ == 24));
- ASSERT(src.shift_op() == ROR);
+ // Operand maps ROR #0 to LSL #0.
+ ASSERT((src.shift_op() == ROR) ||
+ ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
}
@@ -1770,7 +1817,9 @@ void Assembler::uxtab(Register dst,
(src2.shift_imm_ == 8) ||
(src2.shift_imm_ == 16) ||
(src2.shift_imm_ == 24));
- ASSERT(src2.shift_op() == ROR);
+ // Operand maps ROR #0 to LSL #0.
+ ASSERT((src2.shift_op() == ROR) ||
+ ((src2.shift_op() == LSL) && (src2.shift_imm_ == 0)));
emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
}
@@ -1790,7 +1839,9 @@ void Assembler::uxtb16(Register dst,
(src.shift_imm_ == 8) ||
(src.shift_imm_ == 16) ||
(src.shift_imm_ == 24));
- ASSERT(src.shift_op() == ROR);
+ // Operand maps ROR #0 to LSL #0.
+ ASSERT((src.shift_op() == ROR) ||
+ ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
}
@@ -1814,8 +1865,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
if (src.must_output_reloc_info(this) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
- RecordRelocInfo(src.rmode_, src.imm32_);
- ldr(ip, MemOperand(pc, 0), cond);
+ move_32_bit_immediate(ip, src);
msr(fields, Operand(ip), cond);
return;
}
@@ -2422,7 +2472,7 @@ void Assembler::vmov(const DwVfpRegister dst,
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
- } else if (FLAG_enable_vldr_imm) {
+ } else if (FLAG_enable_vldr_imm && can_use_constant_pool()) {
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control
// generated data which also happens to be executable, a Very Bad
@@ -2438,8 +2488,9 @@ void Assembler::vmov(const DwVfpRegister dst,
// The code could also randomize the order of values, though
// that's tricky because vldr has a limited reach. Furthermore
// it breaks load locality.
- RecordRelocInfo(imm);
- vldr(dst, MemOperand(pc, 0));
+ RelocInfo rinfo(pc_, imm);
+ ConstantPoolAddEntry(rinfo);
+ vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
} else {
// Synthesise the double from ARM immediates.
uint32_t lo, hi;
@@ -3169,6 +3220,7 @@ void Assembler::GrowBuffer() {
ASSERT(rinfo.rmode() == RelocInfo::NONE64);
rinfo.set_pc(rinfo.pc() + pc_delta);
}
+ constant_pool_builder_.Relocate(pc_delta);
}
@@ -3204,28 +3256,16 @@ void Assembler::emit_code_stub_address(Code* stub) {
}
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
- UseConstantPoolMode mode) {
- // We do not try to reuse pool constants.
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(pc_, rmode, data, NULL);
- if (((rmode >= RelocInfo::JS_RETURN) &&
- (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
- (rmode == RelocInfo::CONST_POOL) ||
- mode == DONT_USE_CONSTANT_POOL) {
- // Adjust code for new modes.
- ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsJSReturn(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode)
- || RelocInfo::IsConstPool(rmode)
- || mode == DONT_USE_CONSTANT_POOL);
- // These modes do not need an entry in the constant pool.
- } else {
- RecordRelocInfoConstantPoolEntryHelper(rinfo);
- }
+ RecordRelocInfo(rinfo);
+}
+
+
+void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+ if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
@@ -3236,9 +3276,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
}
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_,
- rmode,
+ if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
+ RelocInfo reloc_info_with_ast_id(rinfo.pc(),
+ rinfo.rmode(),
RecordedAstId().ToInt(),
NULL);
ClearRecordedAstId();
@@ -3250,34 +3290,38 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
}
-void Assembler::RecordRelocInfo(double data) {
- // We do not try to reuse pool constants.
- RelocInfo rinfo(pc_, data);
- RecordRelocInfoConstantPoolEntryHelper(rinfo);
-}
-
-
-void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
- if (rinfo.rmode() == RelocInfo::NONE64) {
- ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
- if (num_pending_64_bit_reloc_info_ == 0) {
- first_const_pool_64_use_ = pc_offset();
- }
- pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
+void Assembler::ConstantPoolAddEntry(const RelocInfo& rinfo) {
+ if (FLAG_enable_ool_constant_pool) {
+ constant_pool_builder_.AddEntry(this, rinfo);
} else {
- ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
- if (num_pending_32_bit_reloc_info_ == 0) {
- first_const_pool_32_use_ = pc_offset();
+ if (rinfo.rmode() == RelocInfo::NONE64) {
+ ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
+ if (num_pending_64_bit_reloc_info_ == 0) {
+ first_const_pool_64_use_ = pc_offset();
+ }
+ pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
+ } else {
+ ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
+ if (num_pending_32_bit_reloc_info_ == 0) {
+ first_const_pool_32_use_ = pc_offset();
+ }
+ pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
}
- pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info.
+ BlockConstPoolFor(1);
}
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolFor(1);
}
void Assembler::BlockConstPoolFor(int instructions) {
+ if (FLAG_enable_ool_constant_pool) {
+ // Should be a no-op if using an out-of-line constant pool.
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
+ return;
+ }
+
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
// Max pool start (if we need a jump and an alignment).
@@ -3299,6 +3343,13 @@ void Assembler::BlockConstPoolFor(int instructions) {
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ if (FLAG_enable_ool_constant_pool) {
+ // Should be a no-op if using an out-of-line constant pool.
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
+ return;
+ }
+
// Some short sequence of instruction mustn't be broken up by constant pool
// emission, such sequences are protected by calls to BlockConstPoolFor and
// BlockConstPoolScope.
@@ -3496,6 +3547,195 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
+MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ return constant_pool_builder_.Allocate(heap);
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ constant_pool_builder_.Populate(this, constant_pool);
+}
+
+
+ConstantPoolBuilder::ConstantPoolBuilder()
+ : entries_(),
+ merged_indexes_(),
+ count_of_64bit_(0),
+ count_of_code_ptr_(0),
+ count_of_heap_ptr_(0),
+ count_of_32bit_(0) { }
+
+
+bool ConstantPoolBuilder::IsEmpty() {
+ return entries_.size() == 0;
+}
+
+
+bool ConstantPoolBuilder::Is64BitEntry(RelocInfo::Mode rmode) {
+ return rmode == RelocInfo::NONE64;
+}
+
+
+bool ConstantPoolBuilder::Is32BitEntry(RelocInfo::Mode rmode) {
+ return !RelocInfo::IsGCRelocMode(rmode) && rmode != RelocInfo::NONE64;
+}
+
+
+bool ConstantPoolBuilder::IsCodePtrEntry(RelocInfo::Mode rmode) {
+ return RelocInfo::IsCodeTarget(rmode);
+}
+
+
+bool ConstantPoolBuilder::IsHeapPtrEntry(RelocInfo::Mode rmode) {
+ return RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode);
+}
+
+
+void ConstantPoolBuilder::AddEntry(Assembler* assm,
+ const RelocInfo& rinfo) {
+ RelocInfo::Mode rmode = rinfo.rmode();
+ ASSERT(rmode != RelocInfo::COMMENT &&
+ rmode != RelocInfo::POSITION &&
+ rmode != RelocInfo::STATEMENT_POSITION &&
+ rmode != RelocInfo::CONST_POOL);
+
+
+ // Try to merge entries which won't be patched.
+ int merged_index = -1;
+ if (RelocInfo::IsNone(rmode) ||
+ (!Serializer::enabled() && (rmode >= RelocInfo::CELL))) {
+ size_t i;
+ std::vector<RelocInfo>::const_iterator it;
+ for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
+ if (RelocInfo::IsEqual(rinfo, *it)) {
+ merged_index = i;
+ break;
+ }
+ }
+ }
+
+ entries_.push_back(rinfo);
+ merged_indexes_.push_back(merged_index);
+
+ if (merged_index == -1) {
+ // Not merged, so update the appropriate count.
+ if (Is64BitEntry(rmode)) {
+ count_of_64bit_++;
+ } else if (Is32BitEntry(rmode)) {
+ count_of_32bit_++;
+ } else if (IsCodePtrEntry(rmode)) {
+ count_of_code_ptr_++;
+ } else {
+ ASSERT(IsHeapPtrEntry(rmode));
+ count_of_heap_ptr_++;
+ }
+ }
+
+ // Check if we still have room for another entry given Arm's ldr and vldr
+ // immediate offset range.
+ if (!(is_uint12(ConstantPoolArray::SizeFor(count_of_64bit_,
+ count_of_code_ptr_,
+ count_of_heap_ptr_,
+ count_of_32bit_))) &&
+ is_uint10(ConstantPoolArray::SizeFor(count_of_64bit_, 0, 0, 0))) {
+ assm->set_constant_pool_full();
+ }
+}
+
+
+void ConstantPoolBuilder::Relocate(int pc_delta) {
+ for (std::vector<RelocInfo>::iterator rinfo = entries_.begin();
+ rinfo != entries_.end(); rinfo++) {
+ ASSERT(rinfo->rmode() != RelocInfo::JS_RETURN);
+ rinfo->set_pc(rinfo->pc() + pc_delta);
+ }
+}
+
+
+MaybeObject* ConstantPoolBuilder::Allocate(Heap* heap) {
+ if (IsEmpty()) {
+ return heap->empty_constant_pool_array();
+ } else {
+ return heap->AllocateConstantPoolArray(count_of_64bit_, count_of_code_ptr_,
+ count_of_heap_ptr_, count_of_32bit_);
+ }
+}
+
+
+void ConstantPoolBuilder::Populate(Assembler* assm,
+ ConstantPoolArray* constant_pool) {
+ ASSERT(constant_pool->count_of_int64_entries() == count_of_64bit_);
+ ASSERT(constant_pool->count_of_code_ptr_entries() == count_of_code_ptr_);
+ ASSERT(constant_pool->count_of_heap_ptr_entries() == count_of_heap_ptr_);
+ ASSERT(constant_pool->count_of_int32_entries() == count_of_32bit_);
+ ASSERT(entries_.size() == merged_indexes_.size());
+
+ int index_64bit = 0;
+ int index_code_ptr = count_of_64bit_;
+ int index_heap_ptr = count_of_64bit_ + count_of_code_ptr_;
+ int index_32bit = count_of_64bit_ + count_of_code_ptr_ + count_of_heap_ptr_;
+
+ size_t i;
+ std::vector<RelocInfo>::const_iterator rinfo;
+ for (rinfo = entries_.begin(), i = 0; rinfo != entries_.end(); rinfo++, i++) {
+ RelocInfo::Mode rmode = rinfo->rmode();
+
+ // Update constant pool if necessary and get the entry's offset.
+ int offset;
+ if (merged_indexes_[i] == -1) {
+ if (Is64BitEntry(rmode)) {
+ offset = constant_pool->OffsetOfElementAt(index_64bit) - kHeapObjectTag;
+ constant_pool->set(index_64bit++, rinfo->data64());
+ } else if (Is32BitEntry(rmode)) {
+ offset = constant_pool->OffsetOfElementAt(index_32bit) - kHeapObjectTag;
+ constant_pool->set(index_32bit++, static_cast<int32_t>(rinfo->data()));
+ } else if (IsCodePtrEntry(rmode)) {
+ offset = constant_pool->OffsetOfElementAt(index_code_ptr) -
+ kHeapObjectTag;
+ constant_pool->set(index_code_ptr++,
+ reinterpret_cast<Object *>(rinfo->data()));
+ } else {
+ ASSERT(IsHeapPtrEntry(rmode));
+ offset = constant_pool->OffsetOfElementAt(index_heap_ptr) -
+ kHeapObjectTag;
+ constant_pool->set(index_heap_ptr++,
+ reinterpret_cast<Object *>(rinfo->data()));
+ }
+ merged_indexes_[i] = offset; // Stash offset for merged entries.
+ } else {
+ size_t merged_index = static_cast<size_t>(merged_indexes_[i]);
+ ASSERT(merged_index < merged_indexes_.size() && merged_index < i);
+ offset = merged_indexes_[merged_index];
+ }
+
+ // Patch vldr/ldr instruction with correct offset.
+ Instr instr = assm->instr_at(rinfo->pc());
+ if (Is64BitEntry(rmode)) {
+ // Instruction to patch must be 'vldr rd, [pp, #0]'.
+ ASSERT((Assembler::IsVldrDPpImmediateOffset(instr) &&
+ Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
+ ASSERT(is_uint10(offset));
+ assm->instr_at_put(rinfo->pc(),
+ Assembler::SetVldrDRegisterImmediateOffset(instr, offset));
+ } else {
+ // Instruction to patch must be 'ldr rd, [pp, #0]'.
+ ASSERT((Assembler::IsLdrPpImmediateOffset(instr) &&
+ Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
+ ASSERT(is_uint12(offset));
+ assm->instr_at_put(rinfo->pc(),
+ Assembler::SetLdrRegisterImmediateOffset(instr, offset));
+ }
+ }
+
+ ASSERT((index_64bit == count_of_64bit_) &&
+ (index_code_ptr == (index_64bit + count_of_code_ptr_)) &&
+ (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) &&
+ (index_32bit == (index_heap_ptr + count_of_32bit_)));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index ccb510420..727b05421 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -39,7 +39,10 @@
#ifndef V8_ARM_ASSEMBLER_ARM_H_
#define V8_ARM_ASSEMBLER_ARM_H_
+
#include <stdio.h>
+#include <vector>
+
#include "assembler.h"
#include "constants-arm.h"
#include "serialize.h"
@@ -376,8 +379,9 @@ struct QwNeonRegister {
}
void split_code(int* vm, int* m) const {
ASSERT(is_valid());
- *m = (code_ & 0x10) >> 4;
- *vm = code_ & 0x0F;
+ int encoded_code = code_ << 1;
+ *m = (encoded_code & 0x10) >> 4;
+ *vm = encoded_code & 0x0F;
}
int code_;
@@ -702,9 +706,42 @@ class NeonListOperand BASE_EMBEDDED {
NeonListType type_;
};
+
+// Class used to build a constant pool.
+class ConstantPoolBuilder BASE_EMBEDDED {
+ public:
+ explicit ConstantPoolBuilder();
+ void AddEntry(Assembler* assm, const RelocInfo& rinfo);
+ void Relocate(int pc_delta);
+ bool IsEmpty();
+ MaybeObject* Allocate(Heap* heap);
+ void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
+
+ inline int count_of_64bit() const { return count_of_64bit_; }
+ inline int count_of_code_ptr() const { return count_of_code_ptr_; }
+ inline int count_of_heap_ptr() const { return count_of_heap_ptr_; }
+ inline int count_of_32bit() const { return count_of_32bit_; }
+
+ private:
+ bool Is64BitEntry(RelocInfo::Mode rmode);
+ bool Is32BitEntry(RelocInfo::Mode rmode);
+ bool IsCodePtrEntry(RelocInfo::Mode rmode);
+ bool IsHeapPtrEntry(RelocInfo::Mode rmode);
+
+ std::vector<RelocInfo> entries_;
+ std::vector<int> merged_indexes_;
+ int count_of_64bit_;
+ int count_of_code_ptr_;
+ int count_of_heap_ptr_;
+ int count_of_32bit_;
+};
+
+
extern const Instr kMovLrPc;
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
+extern const Instr kLdrPpMask;
+extern const Instr kLdrPpPattern;
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
extern const Instr kBlxIp;
@@ -780,9 +817,27 @@ class Assembler : public AssemblerBase {
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address target_pointer_address_at(Address pc));
+ // Return the address in the constant pool of the code target address used by
+ // the branch/call instruction at pc, or the object in a mov.
+ INLINE(static Address target_constant_pool_address_at(
+ Address pc, ConstantPoolArray* constant_pool));
+
// Read/Modify the code target address in the branch/call instruction at pc.
- INLINE(static Address target_address_at(Address pc));
- INLINE(static void set_target_address_at(Address pc, Address target));
+ INLINE(static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool));
+ INLINE(static void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target));
+ INLINE(static Address target_address_at(Address pc, Code* code)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ INLINE(static void set_target_address_at(Address pc,
+ Code* code,
+ Address target)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target);
+ }
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -795,7 +850,7 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address constant_pool_entry, Address target);
+ Address constant_pool_entry, Code* code, Address target);
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
@@ -1292,12 +1347,6 @@ class Assembler : public AssemblerBase {
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
- static bool use_immediate_embedded_pointer_loads(
- const Assembler* assembler) {
- return CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
- (assembler == NULL || !assembler->predictable_code_size());
- }
-
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
@@ -1401,6 +1450,8 @@ class Assembler : public AssemblerBase {
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
static bool IsVldrDRegisterImmediate(Instr instr);
+ static bool IsLdrPpImmediateOffset(Instr instr);
+ static bool IsVldrDPpImmediateOffset(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
static int GetVldrDRegisterImmediateOffset(Instr instr);
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
@@ -1446,6 +1497,20 @@ class Assembler : public AssemblerBase {
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
+ // Allocate a constant pool of the correct size for the generated code.
+ MaybeObject* AllocateConstantPool(Heap* heap);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
+ bool can_use_constant_pool() const {
+ return is_constant_pool_available() && !constant_pool_full_;
+ }
+
+ void set_constant_pool_full() {
+ constant_pool_full_ = true;
+ }
+
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
@@ -1499,6 +1564,14 @@ class Assembler : public AssemblerBase {
(pc_offset() < no_const_pool_before_);
}
+ bool is_constant_pool_available() const {
+ return constant_pool_available_;
+ }
+
+ void set_constant_pool_available(bool available) {
+ constant_pool_available_ = available;
+ }
+
private:
int next_buffer_check_; // pc offset of next buffer check
@@ -1556,19 +1629,27 @@ class Assembler : public AssemblerBase {
// Number of pending reloc info entries in the 64 bits buffer.
int num_pending_64_bit_reloc_info_;
+ ConstantPoolBuilder constant_pool_builder_;
+
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
+ // Indicates whether the constant pool can be accessed, which is only possible
+ // if the pp register points to the current code object's constant pool.
+ bool constant_pool_available_;
+ // Indicates whether the constant pool is too full to accept new entries due
+ // to the ldr instruction's limitted immediate offset range.
+ bool constant_pool_full_;
+
// Code emission
inline void CheckBuffer();
void GrowBuffer();
inline void emit(Instr x);
// 32-bit immediate values
- void move_32_bit_immediate(Condition cond,
- Register rd,
- SBit s,
- const Operand& x);
+ void move_32_bit_immediate(Register rd,
+ const Operand& x,
+ Condition cond = al);
// Instruction generation
void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
@@ -1588,14 +1669,15 @@ class Assembler : public AssemblerBase {
};
// Record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0,
- UseConstantPoolMode mode = USE_CONSTANT_POOL);
- void RecordRelocInfo(double data);
- void RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo);
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ void RecordRelocInfo(const RelocInfo& rinfo);
+ void ConstantPoolAddEntry(const RelocInfo& rinfo);
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
+ friend class FrameAndConstantPoolScope;
+ friend class ConstantPoolUnavailableScope;
PositionsRecorder positions_recorder_;
friend class PositionsRecorder;
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 7898086c0..f13814641 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -155,10 +155,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// tail call a stub
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ mov(r2, Operand(undefined_sentinel));
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -262,7 +259,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ push(function); // Preserve the function.
__ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(r0);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
}
@@ -282,7 +279,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(argument);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
}
@@ -292,7 +289,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
__ push(r1);
// Push function as parameter to the runtime call.
@@ -329,7 +326,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
@@ -339,10 +336,12 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions) {
+ bool count_constructions,
+ bool create_memento) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
+ // -- r2 : allocation site or undefined
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -350,11 +349,22 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
+
+ // Should never create mementos before slack tracking is finished.
+ ASSERT(!count_constructions || !create_memento);
+
Isolate* isolate = masm->isolate();
// Enter a construct frame.
{
- FrameScope scope(masm, StackFrame::CONSTRUCT);
+ FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(r2, r3);
+ __ push(r2);
+ }
// Preserve the two incoming parameters on the stack.
__ SmiTag(r0);
@@ -405,7 +415,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Push(r2, r1); // r1 = constructor
// The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
__ pop(r2);
__ pop(r1);
@@ -417,13 +427,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r1: constructor function
// r2: initial map
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+ if (create_memento) {
+ __ add(r3, r3, Operand(AllocationMemento::kSize / kPointerSize));
+ }
+
__ Allocate(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
// r1: constructor function
// r2: initial map
- // r3: object size
+ // r3: object size (not including memento if create_memento)
// r4: JSObject (not tagged)
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4);
@@ -437,12 +451,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Fill all the in-object properties with the appropriate filler.
// r1: constructor function
// r2: initial map
- // r3: object size (in words)
+ // r3: object size (in words, including memento if create_memento)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+
if (count_constructions) {
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
kBitsPerByte);
@@ -456,9 +471,28 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ InitializeFieldsWithFiller(r5, r0, r6);
// To allow for truncation.
__ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
+ __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ __ InitializeFieldsWithFiller(r5, r0, r6);
+ } else if (create_memento) {
+ __ sub(r6, r3, Operand(AllocationMemento::kSize / kPointerSize));
+ __ add(r0, r4, Operand(r6, LSL, kPointerSizeLog2)); // End of object.
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ InitializeFieldsWithFiller(r5, r0, r6);
+
+ // Fill in memento fields.
+ // r5: points to the allocated but uninitialized memento.
+ __ LoadRoot(r6, Heap::kAllocationMementoMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+ // Load the AllocationSite
+ __ ldr(r6, MemOperand(sp, 2 * kPointerSize));
+ ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+ } else {
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ __ InitializeFieldsWithFiller(r5, r0, r6);
}
- __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- __ InitializeFieldsWithFiller(r5, r0, r6);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -556,13 +590,47 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
// r1: constructor function
__ bind(&rt_call);
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
+ __ push(r2);
+ }
+
__ push(r1); // argument for Runtime_NewObject
- __ CallRuntime(Runtime::kNewObject, 1);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ }
__ mov(r4, r0);
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
// Receiver for constructor call allocated.
// r4: JSObject
__ bind(&allocated);
+
+ if (create_memento) {
+ __ ldr(r2, MemOperand(sp, kPointerSize * 2));
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ cmp(r2, r5);
+ __ b(eq, &count_incremented);
+ // r2 is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ ldr(r3, FieldMemOperand(r2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ add(r3, r3, Operand(Smi::FromInt(1)));
+ __ str(r3, FieldMemOperand(r2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ bind(&count_incremented);
+ }
+
__ push(r4);
__ push(r4);
@@ -665,17 +733,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
@@ -738,9 +806,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(r0, Operand(r3));
if (is_construct) {
// No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- __ mov(r2, Operand(undefined_sentinel));
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
} else {
@@ -768,13 +834,13 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
GenerateTailCallToReturnedCode(masm);
}
static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
__ push(r1);
// Push function as parameter to the runtime call.
@@ -782,7 +848,7 @@ static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
// Whether to compile in a background thread.
__ Push(masm->isolate()->factory()->ToBoolean(concurrent));
- __ CallRuntime(Runtime::kCompileOptimized, 2);
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
// Restore receiver.
__ pop(r1);
}
@@ -870,14 +936,14 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Preserve registers across notification, this is important for compiled
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
}
@@ -899,11 +965,11 @@ void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass the function and deoptimization type to the runtime system.
__ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(r0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
}
// Get the full codegen state from the stack and untag it -> r6.
@@ -947,7 +1013,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(r0);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
@@ -963,20 +1029,26 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ ldr(r1, MemOperand(r0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
- // Load the OSR entrypoint offset from the deoptimization data.
- // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ ldr(r1, MemOperand(r1, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+ if (FLAG_enable_ool_constant_pool) {
+ __ ldr(pp, FieldMemOperand(r0, Code::kConstantPoolOffset));
+ }
- // Compute the target address = code_obj + header_size + osr_offset
- // <entry_addr> = <code_obj> + #header_size + <osr_offset>
- __ add(r0, r0, Operand::SmiUntag(r1));
- __ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex)));
- // And "return" to the OSR entry point of the function.
- __ Ret();
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ add(r0, r0, Operand::SmiUntag(r1));
+ __ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+ }
}
@@ -987,8 +1059,8 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1039,7 +1111,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ tst(r3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &shift_arguments);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ ldr(r2, MemOperand(r2, -kPointerSize));
// r0: actual number of arguments
@@ -1062,7 +1134,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{
// Enter an internal frame in order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(r0);
__ push(r0);
@@ -1189,7 +1261,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kFunctionOffset = 4 * kPointerSize;
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
__ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
__ push(r0);
@@ -1247,7 +1319,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &push_receiver);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ JumpIfSmi(r0, &call_to_object);
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ cmp(r0, r1);
@@ -1354,8 +1426,14 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// then tear down the parameters.
__ ldr(r1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize)));
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
+
+ if (FLAG_enable_ool_constant_pool) {
+ __ add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ __ ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
+ } else {
+ __ mov(sp, fp);;
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ }
__ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
__ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
}
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 44de7aabc..832296b27 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -45,7 +45,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
}
@@ -76,7 +76,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
}
@@ -87,7 +87,8 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
}
@@ -98,15 +99,15 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
}
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { r2 };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { r2, r3 };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
@@ -141,7 +142,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
}
@@ -165,6 +166,26 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
+void StringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0, r2 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -226,7 +247,7 @@ static void InitializeArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
}
@@ -254,7 +275,7 @@ static void InitializeInternalArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
}
@@ -365,7 +386,7 @@ void StringAddStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
}
@@ -490,7 +511,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
int param_count = descriptor->register_param_count_;
{
// Call the runtime system in a fresh internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
ASSERT(descriptor->register_param_count_ == 0 ||
r0.is(descriptor->register_params_[param_count - 1]));
// Push arguments
@@ -602,6 +623,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
Register input_reg = source();
Register result_reg = destination();
+ ASSERT(is_truncating());
int double_offset = offset();
// Account for saved regs if input is sp.
@@ -1480,22 +1502,9 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
}
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ and_(scratch, value, Operand(0xf));
- __ cmp(scratch, Operand(0xf));
- __ b(eq, oom_label);
-}
-
-
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate) {
// r0: result parameter for PerformGC, if any
@@ -1554,9 +1563,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
{
// Prevent literal pool emission before return address.
Assembler::BlockConstPoolScope block_const_pool(masm);
- masm->add(lr, pc, Operand(4));
+ __ add(lr, pc, Operand(4));
__ str(lr, MemOperand(sp, 0));
- masm->Jump(r5);
+ __ Call(r5);
}
__ VFPEnsureFPSCRState(r2);
@@ -1593,26 +1602,21 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
__ b(eq, &retry);
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
-
// Retrieve the pending exception.
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(ip));
- // See if we just retrieved an OOM exception.
- JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
-
// Clear the pending exception.
- __ mov(r3, Operand(isolate->factory()->the_hole_value()));
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ str(r3, MemOperand(ip));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
- __ cmp(r0, Operand(isolate->factory()->termination_exception()));
+ __ LoadRoot(r3, Heap::kTerminationExceptionRootIndex);
+ __ cmp(r0, r3);
__ b(eq, throw_termination_exception);
// Handle normal exception.
@@ -1644,7 +1648,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ sub(r6, r6, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++.
- FrameScope scope(masm, StackFrame::MANUAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
// Set up argc and the builtin function in callee-saved registers.
@@ -1657,13 +1661,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Label throw_normal_exception;
Label throw_termination_exception;
- Label throw_out_of_memory_exception;
// Call into the runtime system.
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
false,
false);
@@ -1671,7 +1673,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
false);
@@ -1681,29 +1682,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
true);
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ mov(r0, Operand(false, RelocInfo::NONE32));
- __ mov(r2, Operand(external_caught));
- __ str(r0, MemOperand(r2));
-
- // Set pending exception and r0 to out of memory exception.
- Label already_have_failure;
- JumpIfOOM(masm, r0, ip, &already_have_failure);
- Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
- __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ bind(&already_have_failure);
- __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ str(r0, MemOperand(r2));
- // Fall through to the next label.
+ { FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, r0);
+ __ CallCFunction(
+ ExternalReference::out_of_memory_function(masm->isolate()), 0, 0);
+ }
__ bind(&throw_termination_exception);
__ ThrowUncatchable(r0);
@@ -1755,7 +1741,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Isolate* isolate = masm->isolate();
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
if (FLAG_enable_ool_constant_pool) {
- __ mov(r8, Operand(Smi::FromInt(marker)));
+ __ mov(r8, Operand(isolate->factory()->empty_constant_pool_array()));
}
__ mov(r7, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
@@ -1843,16 +1829,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ mov(ip, Operand(entry));
}
__ ldr(ip, MemOperand(ip)); // deref address
+ __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Branch and link to JSEntryTrampoline. We don't use the double underscore
- // macro for the add instruction because we don't want the coverage tool
- // inserting instructions here after we read the pc. We block literal pool
- // emission for the same reason.
- {
- Assembler::BlockConstPoolScope block_const_pool(masm);
- __ mov(lr, Operand(pc));
- masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
+ // Branch and link to JSEntryTrampoline.
+ __ Call(ip);
// Unlink this frame from the handler chain.
__ PopTryHandler();
@@ -1897,8 +1877,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// * function: r1 or at sp.
//
// An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed on the stack,
-// in the safepoint slot for register r4.
+// In this case the offset to the inline site to patch is passed in r5.
// (See LCodeGen::DoInstanceOfKnownGlobal)
void InstanceofStub::Generate(MacroAssembler* masm) {
// Call site inlining and patching implies arguments in registers.
@@ -1957,14 +1936,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
ASSERT(HasArgsInRegisters());
// Patch the (relocated) inlined map check.
- // The offset was stored in r4 safepoint slot.
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
- __ LoadFromSafepointRegisterSlot(scratch, r4);
- __ sub(inline_site, lr, scratch);
- // Get the map location in scratch and patch it.
- __ GetRelocatedValueLocation(inline_site, scratch);
- __ ldr(scratch, MemOperand(scratch));
- __ str(map, FieldMemOperand(scratch, Cell::kValueOffset));
+ // The offset was stored in r5
+ // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+ const Register offset = r5;
+ __ sub(inline_site, lr, offset);
+ // Get the map location in r5 and patch it.
+ __ GetRelocatedValueLocation(inline_site, offset);
+ __ ldr(offset, MemOperand(offset));
+ __ str(map, FieldMemOperand(offset, Cell::kValueOffset));
}
// Register mapping: r3 is object map and r4 is function prototype.
@@ -2057,7 +2036,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r0, r1);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
}
@@ -2099,108 +2078,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- __ cmp(r0, Operand(masm->isolate()->factory()->length_string()));
- __ b(ne, &miss);
- receiver = r1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = r0;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss);
-
- __ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
- Label miss;
-
- Register receiver;
- Register value;
- if (kind() == Code::KEYED_STORE_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -----------------------------------
- __ cmp(r1, Operand(masm->isolate()->factory()->length_string()));
- __ b(ne, &miss);
- receiver = r2;
- value = r0;
- } else {
- ASSERT(kind() == Code::STORE_IC);
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : key
- // -----------------------------------
- receiver = r1;
- value = r0;
- }
- Register scratch = r3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
- __ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
- __ b(eq, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
Register InstanceofStub::left() { return r0; }
@@ -2258,7 +2135,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
@@ -2278,11 +2155,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
__ str(r3, MemOperand(sp, 1 * kPointerSize));
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Stack layout:
// sp[0] : number of parameters (tagged)
// sp[4] : address of receiver argument
@@ -2336,7 +2213,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ add(r9, r9, Operand(FixedArray::kHeaderSize));
// 3. Arguments object.
- __ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
+ __ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT);
@@ -2345,7 +2222,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// r2 = argument count (tagged)
// Get the arguments boilerplate from the current native context into r4.
const int kNormalOffset =
- Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX);
const int kAliasedOffset =
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
@@ -2381,7 +2258,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, r4 will point there, otherwise
// it will point to the backing store.
- __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
+ __ add(r4, r0, Operand(Heap::kSloppyArgumentsObjectSize));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
// r0 = address of new object (tagged)
@@ -2396,7 +2273,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ mov(r3, r4, LeaveCC, eq);
__ b(eq, &skip_parameter_map);
- __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ LoadRoot(r6, Heap::kSloppyArgumentsElementsMapRootIndex);
__ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
__ add(r6, r1, Operand(Smi::FromInt(2)));
__ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
@@ -2426,7 +2303,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// r1 = mapping index (tagged)
// r3 = address of backing store (tagged)
// r4 = address of parameter map (tagged), which is also the address of new
- // object + Heap::kArgumentsObjectSize (tagged)
+ // object + Heap::kSloppyArgumentsObjectSize (tagged)
// r0 = temporary scratch (a.o., for address calculation)
// r5 = the hole value
__ jmp(&parameters_test);
@@ -2444,7 +2321,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ b(ne, &parameters_loop);
// Restore r0 = new object (tagged)
- __ sub(r0, r4, Operand(Heap::kArgumentsObjectSize));
+ __ sub(r0, r4, Operand(Heap::kSloppyArgumentsObjectSize));
__ bind(&skip_parameter_map);
// r0 = address of new object (tagged)
@@ -2482,7 +2359,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// r2 = argument count (tagged)
__ bind(&runtime);
__ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
@@ -2517,7 +2394,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ b(eq, &add_arguments_object);
__ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
+ __ add(r1, r1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
// Do the allocation of both objects in one go.
__ Allocate(r1, r0, r2, r3, &runtime,
@@ -2527,7 +2404,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
__ ldr(r4, MemOperand(r4, Context::SlotOffset(
- Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
+ Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX)));
// Copy the JS object part.
__ CopyFields(r0, r4, d0, JSObject::kHeaderSize / kPointerSize);
@@ -2548,7 +2425,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
+ __ add(r4, r0, Operand(Heap::kStrictArgumentsObjectSize));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
__ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
__ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
@@ -2576,7 +2453,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
}
@@ -2585,7 +2462,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2960,7 +2837,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -3004,82 +2881,97 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r0 : number of arguments to the construct function
// r1 : the function to call
- // r2 : cache cell for call target
+ // r2 : Feedback vector
+ // r3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->uninitialized_symbol());
- // Load the cache state into r3.
- __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
+ // Load the cache state into r4.
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ cmp(r3, r1);
+ __ cmp(r4, r1);
__ b(eq, &done);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in ecx.
- __ ldr(r5, FieldMemOperand(r3, 0));
- __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
- __ b(ne, &miss);
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ __ ldr(r5, FieldMemOperand(r4, 0));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(ne, &miss);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(r3);
- __ cmp(r1, r3);
- __ b(ne, &megamorphic);
- __ jmp(&done);
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
+ __ cmp(r1, r4);
+ __ b(ne, &megamorphic);
+ __ jmp(&done);
+ }
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex);
__ b(eq, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
+ __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
__ jmp(&done);
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
+ // An uninitialized cache is patched with the function
__ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(r3);
- __ cmp(r1, r3);
- __ b(ne, &not_array_function);
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the cell
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
+ __ cmp(r1, r4);
+ __ b(ne, &not_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(r0);
+ __ Push(r3, r2, r1, r0);
- // Arguments register must be smi-tagged to call out.
- __ SmiTag(r0);
- __ Push(r2, r1, r0);
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
- CreateAllocationSiteStub create_stub;
- __ CallStub(&create_stub);
+ __ Pop(r3, r2, r1, r0);
+ __ SmiUntag(r0);
+ }
+ __ b(&done);
- __ Pop(r2, r1, r0);
- __ SmiUntag(r0);
+ __ bind(&not_array_function);
}
- __ b(&done);
- __ bind(&not_array_function);
- __ str(r1, FieldMemOperand(r2, Cell::kValueOffset));
- // No need for a write barrier here - cells are rescanned.
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ str(r1, MemOperand(r4, 0));
+
+ __ Push(r4, r2, r1);
+ __ RecordWrite(r2, r4, r1, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(r4, r2, r1);
__ bind(&done);
}
@@ -3087,7 +2979,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
void CallFunctionStub::Generate(MacroAssembler* masm) {
// r1 : the function to call
- // r2 : cache cell for call target
+ // r2 : feedback vector
+ // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
Label slow, non_function, wrap, cont;
if (NeedsChecks()) {
@@ -3096,11 +2990,15 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(r1, &non_function);
// Goto slow case if we do not have a function.
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
__ b(ne, &slow);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+ // Type information was updated. Because we may call Array, which
+ // expects either undefined or an AllocationSite in ebx we need
+ // to set ebx to undefined.
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
}
}
@@ -3122,7 +3020,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ b(ne, &cont);
}
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ ldr(r3, MemOperand(sp, argc_ * kPointerSize));
if (NeedsChecks()) {
@@ -3143,14 +3041,15 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
+ // object (megamorphic symbol) so no write barrier is needed.
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
+ __ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize));
}
// Check for function proxy.
- __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function);
__ push(r1); // put proxy as additional argument
__ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
@@ -3176,7 +3075,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (CallAsMethod()) {
__ bind(&wrap);
// Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ { FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
__ Push(r1, r3);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ pop(r1);
@@ -3190,21 +3089,42 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// r0 : number of arguments
// r1 : the function to call
- // r2 : cache cell for call target
+ // r2 : feedback vector
+ // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
__ b(ne, &slow);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+
+ __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into r2.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by r3 + 1.
+ __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into r2, or undefined.
+ __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
+ __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(eq, &feedback_register_initialized);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(r2, r5);
}
// Jump to the function-specific construct stub.
- Register jmp_reg = r3;
+ Register jmp_reg = r4;
__ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(jmp_reg, FieldMemOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@@ -3212,10 +3132,10 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r0: number of arguments
// r1: called object
- // r3: object type
+ // r4: object type
Label do_call;
__ bind(&slow);
- __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function_call);
__ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
@@ -3290,7 +3210,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
} else {
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@@ -3312,7 +3232,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ SmiTag(index_);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
__ Move(result_, r0);
call_helper.AfterCall(masm);
__ jmp(&exit_);
@@ -3760,7 +3680,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
__ bind(&single_char);
// r0: original string
@@ -3918,7 +3838,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
@@ -4405,7 +4325,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
__ bind(&miss);
@@ -4459,7 +4379,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r0);
__ Push(lr, r1, r0);
__ mov(ip, Operand(Smi::FromInt(op_)));
@@ -4824,7 +4744,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
@@ -4837,13 +4757,13 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ Ret();
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
@@ -4857,18 +4777,10 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
__ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate())));
AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
@@ -5175,7 +5087,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
+ // Fix kind and retry (only if we have an allocation site in the slot).
__ add(r3, r3, Operand(1));
if (FLAG_debug_code) {
@@ -5283,44 +5195,31 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc (only if argument_count_ == ANY)
// -- r1 : constructor
- // -- r2 : type info cell
+ // -- r2 : AllocationSite or undefined
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
+
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ tst(r3, Operand(kSmiTagMask));
+ __ tst(r4, Operand(kSmiTagMask));
__ Assert(ne, kUnexpectedInitialMapForArrayFunction);
- __ CompareObjectType(r3, r3, r4, MAP_TYPE);
+ __ CompareObjectType(r4, r4, r5, MAP_TYPE);
__ Assert(eq, kUnexpectedInitialMapForArrayFunction);
- // We should either have undefined in ebx or a valid cell
- Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
- __ b(eq, &okay_here);
- __ ldr(r3, FieldMemOperand(r2, 0));
- __ cmp(r3, Operand(cell_map));
- __ Assert(eq, kExpectedPropertyCellInRegisterEbx);
- __ bind(&okay_here);
+ // We should either have undefined in r2 or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(r2, r4);
}
Label no_info;
// Get the elements kind and case on that.
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &no_info);
- __ ldr(r2, FieldMemOperand(r2, Cell::kValueOffset));
-
- // If the type cell is undefined, or contains anything other than an
- // AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ ldr(r4, FieldMemOperand(r2, 0));
- __ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
- __ b(ne, &no_info);
__ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(r3);
@@ -5429,7 +5328,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register context = cp;
int argc = ArgumentBits::decode(bit_field_);
- bool restore_context = RestoreContextBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
typedef FunctionCallbackArguments FCA;
@@ -5478,7 +5377,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// it's not controlled by GC.
const int kApiStackSpace = 4;
- FrameScope frame_scope(masm, StackFrame::MANUAL);
+ FrameAndConstantPoolScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
ASSERT(!api_function_address.is(r0) && !scratch.is(r0));
@@ -5507,15 +5406,20 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
- MemOperand return_value_operand(fp,
- (2 + FCA::kReturnValueOffset) * kPointerSize);
+ // Stores return the first js argument
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
__ CallApiFunctionAndReturn(api_function_address,
thunk_ref,
kStackUnwindSpace,
return_value_operand,
- restore_context ?
- &context_restore_operand : NULL);
+ &context_restore_operand);
}
@@ -5533,7 +5437,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ add(r1, r0, Operand(1 * kPointerSize)); // r1 = PCA
const int kApiStackSpace = 1;
- FrameScope frame_scope(masm, StackFrame::MANUAL);
+ FrameAndConstantPoolScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
// Create PropertyAccessorInfo instance on the stack above the exit frame with
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 7a371f169..ef78802be 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -324,7 +324,7 @@ class RecordWriteStub: public PlatformCodeStub {
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 78bb66c49..14f4705cb 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -343,7 +343,7 @@ enum NeonSize {
Neon8 = 0x0,
Neon16 = 0x1,
Neon32 = 0x2,
- Neon64 = 0x4
+ Neon64 = 0x3
};
// -----------------------------------------------------------------------------
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index efd11069b..12258ccad 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -117,7 +117,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
@@ -265,9 +265,10 @@ void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm.cc).
// ----------- S t a t e -------------
// -- r1 : function
- // -- r2 : cache cell for call target
+ // -- r2 : feedback array
+ // -- r3 : slot in feedback array
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), 0);
+ Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), 0);
}
@@ -286,9 +287,10 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments (not smi)
// -- r1 : constructor function
- // -- r2 : cache cell for call target
+ // -- r2 : feedback array
+ // -- r3 : feedback slot (smi)
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), r0.bit());
+ Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), r0.bit());
}
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 6031499db..ef3ea275c 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -50,13 +50,36 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength * Assembler::kInstrSize;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->bkpt(0);
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->bkpt(0);
+ }
+ }
+
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
@@ -350,6 +373,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
}
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ SetFrameSlot(offset, value);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 49e4126b3..aa8ee22b7 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -1061,7 +1061,7 @@ void Decoder::DecodeType3(Instruction* instr) {
if (instr->Bits(19, 16) == 0xF) {
switch (instr->Bits(11, 10)) {
case 0:
- Format(instr, "uxtb16'cond 'rd, 'rm, ror #0");
+ Format(instr, "uxtb16'cond 'rd, 'rm");
break;
case 1:
Format(instr, "uxtb16'cond 'rd, 'rm, ror #8");
@@ -1085,7 +1085,7 @@ void Decoder::DecodeType3(Instruction* instr) {
if (instr->Bits(19, 16) == 0xF) {
switch (instr->Bits(11, 10)) {
case 0:
- Format(instr, "uxtb'cond 'rd, 'rm, ror #0");
+ Format(instr, "uxtb'cond 'rd, 'rm");
break;
case 1:
Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
@@ -1100,7 +1100,7 @@ void Decoder::DecodeType3(Instruction* instr) {
} else {
switch (instr->Bits(11, 10)) {
case 0:
- Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #0");
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm");
break;
case 1:
Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
@@ -1566,7 +1566,8 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl signed
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ if ((instr->VdValue() & 1) != 0) Unknown(instr);
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
@@ -1579,7 +1580,8 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl unsigned
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ if ((instr->VdValue() & 1) != 0) Unknown(instr);
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 813e9492d..b5ec2d5fd 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -111,6 +111,25 @@ class JumpPatchSite BASE_EMBEDDED {
};
+static void EmitStackCheck(MacroAssembler* masm_,
+ Register stack_limit_scratch,
+ int pointers = 0,
+ Register scratch = sp) {
+ Isolate* isolate = masm_->isolate();
+ Label ok;
+ ASSERT(scratch.is(sp) == (pointers == 0));
+ if (pointers != 0) {
+ __ sub(scratch, sp, Operand(pointers * kPointerSize));
+ }
+ __ LoadRoot(stack_limit_scratch, Heap::kStackLimitRootIndex);
+ __ cmp(scratch, Operand(stack_limit_scratch));
+ __ b(hs, &ok);
+ PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
+ __ Call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+}
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
@@ -130,6 +149,9 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ InitializeFeedbackVector();
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -144,10 +166,10 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Classic mode functions and builtins need to replace the receiver with the
+ // Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->is_classic_mode() && !info->is_native()) {
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ ldr(r2, MemOperand(sp, receiver_offset));
@@ -170,27 +192,34 @@ void FullCodeGenerator::Generate() {
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(BUILD_FUNCTION_FRAME);
info->AddNoFrameRange(0, masm_->pc_offset());
- __ LoadConstantPoolPointerRegister();
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
ASSERT(!info->function()->is_generator() || locals_count == 0);
if (locals_count > 0) {
- // Emit a loop to initialize stack cells for locals when optimizing for
- // size. Otherwise, unroll the loop for maximum performance.
+ if (locals_count >= 128) {
+ EmitStackCheck(masm_, r2, locals_count, r9);
+ }
__ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
- if (FLAG_optimize_for_size && locals_count > 4) {
- Label loop;
- __ mov(r2, Operand(locals_count));
- __ bind(&loop);
- __ sub(r2, r2, Operand(1), SetCC);
- __ push(r9);
- __ b(&loop, ne);
- } else {
- for (int i = 0; i < locals_count; i++) {
+ int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ mov(r2, Operand(loop_iterations));
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ for (int i = 0; i < kMaxPushes; i++) {
__ push(r9);
}
+ // Continue loop if not done.
+ __ sub(r2, r2, Operand(1), SetCC);
+ __ b(&loop_header, ne);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ for (int i = 0; i < remaining; i++) {
+ __ push(r9);
}
}
}
@@ -205,13 +234,13 @@ void FullCodeGenerator::Generate() {
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
__ push(r1);
__ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ push(r1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in r0. It replaces the context passed to us.
@@ -261,12 +290,12 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
+ if (strict_mode() == STRICT) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
ArgumentsAccessStub stub(type);
__ CallStub(&stub);
@@ -292,7 +321,7 @@ void FullCodeGenerator::Generate() {
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableDeclaration* function = scope()->function();
ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
+ function->proxy()->var()->mode() == CONST_LEGACY);
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
@@ -301,13 +330,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
- __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ bind(&ok);
+ EmitStackCheck(masm_, ip);
}
{ Comment cmnt(masm_, "[ Body");
@@ -668,7 +691,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, NOT_CONTEXTUAL, condition->test_id());
+ CallIC(ic, condition->test_id());
__ tst(result_register(), result_register());
Split(ne, if_true, if_false, fall_through);
}
@@ -789,7 +812,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_->Add(variable->name(), zone());
@@ -838,7 +861,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ mov(r0, Operand(Smi::FromInt(0))); // Indicates no initial value.
__ Push(cp, r2, r1, r0);
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -894,7 +917,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(cp, r2, r1);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -966,7 +989,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ mov(r1, Operand(pairs));
__ mov(r0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
__ Push(cp, r1, r0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
// Return value is ignored.
}
@@ -974,7 +997,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
// Return value is ignored.
}
@@ -1029,7 +1052,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, NOT_CONTEXTUAL, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
Label skip;
@@ -1074,6 +1097,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1163,13 +1187,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ Move(r1, cell);
- __ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
- __ str(r2, FieldMemOperand(r1, Cell::kValueOffset));
+ Handle<Object> feedback = Handle<Object>(
+ Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
+ isolate());
+ StoreFeedbackVectorSlot(slot, feedback);
+ __ Move(r1, FeedbackVector());
+ __ mov(r2, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)));
+ __ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(slot)));
__ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@@ -1327,7 +1351,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ FastNewClosureStub stub(info->strict_mode(), info->is_generator());
__ mov(r2, Operand(info));
__ CallStub(&stub);
} else {
@@ -1335,7 +1359,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ LoadRoot(r1, pretenure ? Heap::kTrueValueRootIndex
: Heap::kFalseValueRootIndex);
__ Push(cp, r0, r1);
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
}
context()->Plug(r0);
}
@@ -1357,7 +1381,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
__ tst(temp, temp);
@@ -1370,7 +1394,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
}
// If no outer scope calls eval, we do not need to check more
// context extensions.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1413,7 +1437,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(temp, temp);
@@ -1451,17 +1475,16 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (local->mode() == CONST) {
+ if (local->mode() == CONST_LEGACY) {
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- } else { // LET || CONST_HARMONY
+ } else { // LET || CONST
__ b(ne, done);
__ mov(r0, Operand(var->name()));
__ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
}
}
__ jmp(done);
@@ -1478,7 +1501,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object (receiver) in r0.
__ ldr(r0, GlobalObjectOperand());
@@ -1491,9 +1514,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1525,7 +1547,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Check that we always have valid source position.
ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
+ skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
@@ -1533,18 +1555,18 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Let and const need a read barrier.
GetVar(r0, var);
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
Label done;
__ b(ne, &done);
__ mov(r0, Operand(var->name()));
__ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
__ bind(&done);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
+ ASSERT(var->mode() == CONST_LEGACY);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
}
context()->Plug(r0);
@@ -1556,15 +1578,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
__ mov(r1, Operand(var->name()));
__ Push(cp, r1); // Context and name.
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ bind(&done);
context()->Plug(r0);
}
@@ -1597,7 +1619,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ mov(r2, Operand(expr->pattern()));
__ mov(r1, Operand(expr->flags()));
__ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
__ mov(r5, r0);
__ bind(&materialized);
@@ -1609,7 +1631,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ bind(&runtime_allocate);
__ mov(r0, Operand(Smi::FromInt(size)));
__ Push(r5, r0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ pop(r5);
__ bind(&allocated);
@@ -1649,12 +1671,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
__ mov(r0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
- if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1 || Serializer::enabled() ||
+ if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
@@ -1692,7 +1713,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(r2, Operand(key->value()));
__ ldr(r1, MemOperand(sp));
- CallStoreIC(NOT_CONTEXTUAL, key->LiteralFeedbackId());
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1805,7 +1826,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ mov(r0, Operand(Smi::FromInt(flags)));
__ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
@@ -1865,13 +1886,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2010,7 +2027,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ cmp(sp, r1);
__ b(eq, &post_runtime);
__ push(r0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
__ pop(result_register());
@@ -2076,7 +2093,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(r1, cp);
__ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
kLRHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ pop(r0); // result
EmitReturnSequence();
@@ -2094,7 +2111,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ ldr(r1, MemOperand(sp, kPointerSize));
__ ldr(r0, MemOperand(sp, 2 * kPointerSize));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, TypeFeedbackId::None());
+ CallIC(ic, TypeFeedbackId::None());
__ mov(r1, r0);
__ str(r1, MemOperand(sp, 2 * kPointerSize));
CallFunctionStub stub(1, CALL_AS_METHOD);
@@ -2128,7 +2145,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in r0, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
// is read to throw the value when the resumed generator is already closed.
// r1 will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
@@ -2192,12 +2209,21 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ cmp(r3, Operand(0));
__ b(ne, &slow_resume);
__ ldr(r3, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
- __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(r2);
- __ add(r3, r3, r2);
- __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- __ Jump(r3);
+
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ if (FLAG_enable_ool_constant_pool) {
+ // Load the new code object's constant pool pointer.
+ __ ldr(pp,
+ MemOperand(r3, Code::kConstantPoolOffset - Code::kHeaderSize));
+ }
+
+ __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(r2);
+ __ add(r3, r3, r2);
+ __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ __ Jump(r3);
+ }
__ bind(&slow_resume);
}
@@ -2213,7 +2239,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
ASSERT(!result_register().is(r1));
__ Push(r1, result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
@@ -2228,14 +2254,14 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
} else {
// Throw the provided value.
__ push(r0);
- __ CallRuntime(Runtime::kThrow, 1);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
}
__ jmp(&done);
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
__ push(r1);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
@@ -2253,7 +2279,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ ldr(context_register(),
MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2291,7 +2317,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2318,8 +2344,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2396,20 +2421,14 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(r1);
BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(r0);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
+ ASSERT(expr->IsValidLeftHandSide());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2435,7 +2454,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(r1, r0);
__ pop(r0); // Restore value.
__ mov(r2, Operand(prop->key()->AsLiteral()->value()));
- CallStoreIC(NOT_CONTEXTUAL);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
@@ -2444,7 +2463,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->key());
__ mov(r1, r0);
__ Pop(r0, r2); // r0 = restored value.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic);
@@ -2455,41 +2474,59 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ str(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ mov(r3, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ push(r0); // Value.
+ __ mov(r1, Operand(name));
+ __ mov(r0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(cp, r1, r0); // Context, name, strict mode.
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(r2, Operand(var->name()));
__ ldr(r1, GlobalObjectOperand());
- CallStoreIC(CONTEXTUAL);
- } else if (op == Token::INIT_CONST) {
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- __ ldr(r1, StackOperand(var));
- __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
- __ str(result_register(), StackOperand(var), eq);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
+ if (var->IsLookupSlot()) {
__ push(r0);
__ mov(r0, Operand(var->name()));
__ Push(cp, r0); // Context and name.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, r1);
+ __ ldr(r2, location);
+ __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+ __ b(ne, &skip);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ push(r0); // Value.
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, r1, r0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), strict_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2499,23 +2536,19 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ b(ne, &assign);
__ mov(r3, Operand(var->name()));
__ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
// Perform the assignment.
__ bind(&assign);
- __ str(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ mov(r3, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, r1);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2523,21 +2556,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
__ Check(eq, kLetBindingReInitialization);
}
- // Perform the assignment.
- __ str(r0, location);
- if (var->IsContextSlot()) {
- __ mov(r3, r0);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(r0); // Value.
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, r1, r0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2555,7 +2574,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(r2, Operand(prop->key()->AsLiteral()->value()));
__ pop(r1);
- CallStoreIC(NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
@@ -2569,10 +2588,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
SetSourcePosition(expr->position());
__ Pop(r2, r1); // r1 = key.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
@@ -2599,12 +2618,10 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- ContextualMode mode,
TypeFeedbackId ast_id) {
ic_total_count_++;
// All calls must have a predictable size in full-codegen code to ensure that
// the debugger can patch them correctly.
- ASSERT(mode != CONTEXTUAL || ast_id.IsNone());
__ Call(code, RelocInfo::CODE_TARGET, ast_id, al,
NEVER_INLINE_TARGET_ADDRESS);
}
@@ -2624,7 +2641,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
PrepareForBailout(callee, NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
- // is a classic mode method.
+ // is a sloppy mode method.
__ Push(isolate()->factory()->undefined_value());
flags = NO_CALL_FUNCTION_FLAGS;
} else {
@@ -2716,15 +2733,15 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
SetSourcePosition(expr->position());
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ mov(r2, Operand(cell));
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
+ __ Move(r2, FeedbackVector());
+ __ mov(r3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
// Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
+ __ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2744,15 +2761,15 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
int receiver_offset = 2 + info_->scope()->num_parameters();
__ ldr(r3, MemOperand(fp, receiver_offset * kPointerSize));
- // r2: the language mode.
- __ mov(r2, Operand(Smi::FromInt(language_mode())));
+ // r2: strict mode.
+ __ mov(r2, Operand(Smi::FromInt(strict_mode())));
// r1: the start position of the scope the calls resides in.
__ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
__ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
}
@@ -2768,8 +2785,8 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Call::CallType call_type = expr->GetCallType(isolate());
if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
// arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2826,7 +2843,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
ASSERT(!context_register().is(r2));
__ mov(r2, Operand(proxy->name()));
__ Push(context_register(), r2);
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ Push(r0, r1); // Function, receiver.
// If fast case code has been generated, emit code to push the
@@ -2905,10 +2922,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ mov(r2, Operand(cell));
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
+ if (FLAG_pretenuring_call_new) {
+ StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(),
+ isolate()->factory()->NewAllocationSite());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ Move(r2, FeedbackVector());
+ __ mov(r3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@@ -3380,7 +3404,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
+ __ CallRuntime(Runtime::kHiddenLog, 2);
}
// Finally, we're expected to leave a value on the top of the stack.
@@ -3474,7 +3498,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
__ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
__ bind(&done);
context()->Plug(r0);
}
@@ -3843,7 +3867,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ bind(&not_found);
// Call runtime to perform the lookup.
__ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
__ bind(&done);
context()->Plug(r0);
@@ -4120,8 +4144,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
@@ -4185,9 +4209,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ mov(r1, Operand(Smi::FromInt(strict_mode_flag)));
+ __ mov(r1, Operand(Smi::FromInt(strict_mode())));
__ push(r1);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(r0);
@@ -4195,11 +4217,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
__ ldr(r2, GlobalObjectOperand());
__ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(kNonStrictMode)));
+ __ mov(r0, Operand(Smi::FromInt(SLOPPY)));
__ Push(r2, r1, r0);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(r0);
@@ -4213,7 +4235,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
ASSERT(!context_register().is(r2));
__ mov(r2, Operand(var->name()));
__ Push(context_register(), r2);
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
context()->Plug(r0);
}
} else {
@@ -4288,16 +4310,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -4411,9 +4428,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- NOT_CONTEXTUAL,
- expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4442,7 +4457,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(r2, Operand(prop->key()->AsLiteral()->value()));
__ pop(r1);
- CallStoreIC(NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4455,10 +4470,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_PROPERTY: {
__ Pop(r2, r1); // r1 = key. r2 = receiver.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4478,7 +4493,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(proxy->name()));
// Use a regular load, not a contextual load, to avoid a reference
@@ -4487,6 +4502,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
PrepareForBailout(expr, TOS_REG);
context()->Plug(r0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4496,7 +4512,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
__ bind(&slow);
__ mov(r0, Operand(proxy->name()));
__ Push(cp, r0);
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
@@ -4648,7 +4664,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand::Zero());
@@ -4683,7 +4699,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(eq, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
__ cmp(r0, Operand(0));
Split(ne, if_true, if_false, fall_through);
}
@@ -4839,7 +4855,18 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
-static const int32_t kBranchBeforeInterrupt = 0x5a000004;
+static Address GetInterruptImmediateLoadAddress(Address pc) {
+ Address load_address = pc - 2 * Assembler::kInstrSize;
+ if (!FLAG_enable_ool_constant_pool) {
+ ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address)));
+ } else if (Assembler::IsMovT(Memory::int32_at(load_address))) {
+ load_address -= Assembler::kInstrSize;
+ ASSERT(Assembler::IsMovW(Memory::int32_at(load_address)));
+ } else {
+ ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address)));
+ }
+ return load_address;
+}
void BackEdgeTable::PatchAt(Code* unoptimized_code,
@@ -4847,37 +4874,42 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
BackEdgeState target_state,
Code* replacement_code) {
static const int kInstrSize = Assembler::kInstrSize;
- Address branch_address = pc - 3 * kInstrSize;
+ Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
+ Address branch_address = pc_immediate_load_address - kInstrSize;
CodePatcher patcher(branch_address, 1);
-
switch (target_state) {
case INTERRUPT:
+ {
// <decrement profiling counter>
- // 2a 00 00 01 bpl ok
- // e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
- // e1 2f ff 3c blx ip
+ // bpl ok
+ // ; load interrupt stub address into ip - either of:
+ // ldr ip, [pc/pp, <constant pool offset>] | movw ip, <immed low>
+ // | movt ip, <immed high>
+ // blx ip
// ok-label
- patcher.masm()->b(4 * kInstrSize, pl); // Jump offset is 4 instructions.
- ASSERT_EQ(kBranchBeforeInterrupt, Memory::int32_at(branch_address));
+
+ // Calculate branch offet to the ok-label - this is the difference between
+ // the branch address and |pc| (which points at <blx ip>) plus one instr.
+ int branch_offset = pc + kInstrSize - branch_address;
+ patcher.masm()->b(branch_offset, pl);
break;
+ }
case ON_STACK_REPLACEMENT:
case OSR_AFTER_STACK_CHECK:
// <decrement profiling counter>
- // e1 a0 00 00 mov r0, r0 (NOP)
- // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
- // e1 2f ff 3c blx ip
+ // mov r0, r0 (NOP)
+ // ; load on-stack replacement address into ip - either of:
+ // ldr ip, [pc/pp, <constant pool offset>] | movw ip, <immed low>
+ // | movt ip, <immed high>
+ // blx ip
// ok-label
patcher.masm()->nop();
break;
}
- Address pc_immediate_load_address = pc - 2 * kInstrSize;
// Replace the call address.
- uint32_t interrupt_address_offset =
- Memory::uint16_at(pc_immediate_load_address) & 0xfff;
- Address interrupt_address_pointer = pc + interrupt_address_offset;
- Memory::uint32_at(interrupt_address_pointer) =
- reinterpret_cast<uint32_t>(replacement_code->entry());
+ Assembler::set_target_address_at(pc_immediate_load_address, unoptimized_code,
+ replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_immediate_load_address, replacement_code);
@@ -4891,34 +4923,26 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::int32_at(pc - kInstrSize) == kBlxIp);
- Address branch_address = pc - 3 * kInstrSize;
- Address pc_immediate_load_address = pc - 2 * kInstrSize;
- uint32_t interrupt_address_offset =
- Memory::uint16_at(pc_immediate_load_address) & 0xfff;
- Address interrupt_address_pointer = pc + interrupt_address_offset;
-
- if (Memory::int32_at(branch_address) == kBranchBeforeInterrupt) {
- ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
- reinterpret_cast<uint32_t>(
- isolate->builtins()->InterruptCheck()->entry()));
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_immediate_load_address)));
+ Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
+ Address branch_address = pc_immediate_load_address - kInstrSize;
+ Address interrupt_address = Assembler::target_address_at(
+ pc_immediate_load_address, unoptimized_code);
+
+ if (Assembler::IsBranch(Assembler::instr_at(branch_address))) {
+ ASSERT(interrupt_address ==
+ isolate->builtins()->InterruptCheck()->entry());
return INTERRUPT;
}
ASSERT(Assembler::IsNop(Assembler::instr_at(branch_address)));
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_immediate_load_address)));
- if (Memory::uint32_at(interrupt_address_pointer) ==
- reinterpret_cast<uint32_t>(
- isolate->builtins()->OnStackReplacement()->entry())) {
+ if (interrupt_address ==
+ isolate->builtins()->OnStackReplacement()->entry()) {
return ON_STACK_REPLACEMENT;
}
- ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
- reinterpret_cast<uint32_t>(
- isolate->builtins()->OsrAfterStackCheck()->entry()));
+ ASSERT(interrupt_address ==
+ isolate->builtins()->OsrAfterStackCheck()->entry());
return OSR_AFTER_STACK_CHECK;
}
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index d324a8c6b..3d57105af 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -333,8 +333,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_state) {
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -342,9 +341,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_state,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
@@ -430,7 +427,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
__ b(ne, slow_case);
// Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
__ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
@@ -492,7 +489,7 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
}
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
@@ -518,7 +515,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -879,7 +876,7 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -1063,7 +1060,7 @@ static void KeyedStoreGenerateGenericHelper(
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -1162,8 +1159,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -1172,9 +1168,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_ic_state,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
@@ -1225,7 +1219,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index fcd7d3c9a..55705b807 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -831,7 +831,6 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
LInstruction* instr = NULL;
if (current->CanReplaceWithDummyUses()) {
@@ -1110,6 +1109,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
default:
UNREACHABLE();
return NULL;
@@ -1151,6 +1151,13 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
}
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
@@ -1242,21 +1249,62 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
+ dividend, divisor));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
+ LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
+ return AssignEnvironment(DefineAsRegister(div));
+}
+
+
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL);
- return AssignEnvironment(DefineAsRegister(div));
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
}
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
- LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineAsRegister(div));
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else {
@@ -1265,97 +1313,106 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
- uint32_t divisor_abs = abs(divisor);
- // Dividing by 0, 1, and powers of 2 is easy.
- // Note that IsPowerOf2(0) returns true;
- ASSERT(IsPowerOf2(0) == true);
- if (IsPowerOf2(divisor_abs)) return true;
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
- // We have magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer’s Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
- return false;
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- // LMathFloorOfDiv can only handle a subset of divisors, so fall
- // back to a flooring division in all other cases.
- HValue* right = instr->right();
- if (!right->IsInteger32Constant() ||
- (!CpuFeatures::IsSupported(SUDIV) &&
- !HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()))) {
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(right);
- LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
- LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineAsRegister(div));
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
}
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = CpuFeatures::IsSupported(SUDIV)
- ? UseRegister(right)
- : UseOrConstant(right);
- LOperand* remainder = TempRegister();
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
+ dividend, divisor));
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d10);
+ LOperand* temp2 = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d11);
+ LInstruction* result = DefineAsRegister(new(zone()) LModI(
+ dividend, divisor, temp, temp2));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
- ASSERT(!right->CanBeZero());
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseConstant(right));
- LInstruction* result = DefineAsRegister(mod);
- return (left->CanBeNegative() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
- } else if (CpuFeatures::IsSupported(SUDIV)) {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegister(right));
- LInstruction* result = DefineAsRegister(mod);
- return (right->CanBeZero() ||
- (left->RangeCanInclude(kMinInt) &&
- right->RangeCanInclude(-1) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
- (left->CanBeNegative() &&
- instr->CanBeZero() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)))
- ? AssignEnvironment(result)
- : result;
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
} else {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegister(right),
- FixedTemp(d10),
- FixedTemp(d11));
- LInstruction* result = DefineAsRegister(mod);
- return (right->CanBeZero() ||
- (left->CanBeNegative() &&
- instr->CanBeZero() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)))
- ? AssignEnvironment(result)
- : result;
+ return DoModI(instr);
}
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MOD, instr);
@@ -1846,25 +1903,27 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
- if (val->CheckFlag(HInstruction::kUint32)) {
- LNumberTagU* result = new(zone()) LNumberTagU(value);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- } else if (val->HasRange() && val->range()->IsInSmiRange()) {
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
return DefineAsRegister(new(zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
} else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result = val->CheckFlag(HInstruction::kUint32)
- ? DefineAsRegister(new(zone()) LUint32ToSmi(value))
- : DefineAsRegister(new(zone()) LInteger32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return result;
+ LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
}
- return AssignEnvironment(result);
+ return result;
} else {
ASSERT(to.IsDouble());
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
@@ -1939,6 +1998,20 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub()
? UseFixed(instr->context(), cp)
@@ -2195,11 +2268,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
LOperand* val;
- if (needs_write_barrier ||
- (FLAG_track_fields && instr->field_representation().IsSmi())) {
+ if (needs_write_barrier || instr->field_representation().IsSmi()) {
val = UseTempRegister(instr->value());
- } else if (FLAG_track_double_fields &&
- instr->field_representation().IsDouble()) {
+ } else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
@@ -2209,8 +2280,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject()) {
+ if (instr->field_representation().IsHeapObject()) {
if (!instr->value()->type().IsHeapObject()) {
return AssignEnvironment(result);
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 29a176628..34eb51017 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -80,17 +80,23 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
V(DivI) \
+ V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
@@ -103,7 +109,6 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -124,14 +129,16 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
+ V(MathClz32) \
V(MathExp) \
V(MathFloor) \
- V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
V(ModI) \
V(MulI) \
V(MultiplyAddD) \
@@ -173,7 +180,6 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
- V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(WrapReceiver)
@@ -614,12 +620,45 @@ class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LModI V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LModI(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL,
- LOperand* temp2 = NULL) {
+ LModI(LOperand* left, LOperand* right, LOperand* temp, LOperand* temp2) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
@@ -636,6 +675,42 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 2> {
};
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -648,29 +723,47 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
- bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
-
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
};
-class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
temps_[0] = temp;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
};
@@ -809,6 +902,18 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathClz32(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LMathExp(LOperand* value,
@@ -1885,19 +1990,6 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
@@ -1910,38 +2002,33 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LNumberTagI(LOperand* value) {
+ LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LNumberTagU(LOperand* value) {
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
@@ -2026,6 +2113,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -2101,7 +2189,7 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2164,7 +2252,7 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2365,6 +2453,33 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* context,
@@ -2579,10 +2694,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition),
- instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(BailoutId::None()) { }
+ allocator_(allocator) { }
// Build the sequence for the graph.
LPlatformChunk* Build();
@@ -2607,6 +2719,15 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HBinaryOperation* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
private:
enum Status {
@@ -2717,9 +2838,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
HBasicBlock* current_block_;
HBasicBlock* next_block_;
LAllocator* allocator_;
- int position_;
- LInstruction* instruction_pending_deoptimization_environment_;
- BailoutId pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index cfcc56da2..7152ba21c 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -84,7 +84,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- RegisterDependentCodeForEmbeddedMaps(code);
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
info()->CommitDependencies(code);
}
@@ -147,11 +147,11 @@ bool LCodeGen::GeneratePrologue() {
// fp: Caller's frame pointer.
// lr: Caller's pc.
- // Classic mode functions and builtins need to replace the receiver with the
+ // Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
if (info_->this_has_uses() &&
- info_->is_classic_mode() &&
+ info_->strict_mode() == SLOPPY &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
@@ -173,7 +173,6 @@ bool LCodeGen::GeneratePrologue() {
__ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
frame_is_built_ = true;
info_->AddNoFrameRange(0, masm_->pc_offset());
- __ LoadConstantPoolPointerRegister();
}
// Reserve space for the stack slots needed by the code.
@@ -212,7 +211,7 @@ bool LCodeGen::GeneratePrologue() {
__ CallStub(&stub);
} else {
__ push(r1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in both r0 and cp. It replaces the context
@@ -270,6 +269,9 @@ void LCodeGen::GenerateOsrPrologue() {
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
if (!instr->IsLazyBailout() && !instr->IsGap()) {
safepoints_.BumpLastLazySafepointIndex();
}
@@ -284,7 +286,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -433,7 +436,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
__ Move(scratch, literal);
}
return scratch;
- } else if (op->IsStackSlot() || op->IsArgument()) {
+ } else if (op->IsStackSlot()) {
__ ldr(scratch, ToMemOperand(op));
return scratch;
}
@@ -469,7 +472,7 @@ DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
} else if (r.IsTagged()) {
Abort(kUnsupportedTaggedImmediate);
}
- } else if (op->IsStackSlot() || op->IsArgument()) {
+ } else if (op->IsStackSlot()) {
// TODO(regis): Why is vldr not taking a MemOperand?
// __ vldr(dbl_scratch, ToMemOperand(op));
MemOperand mem_op = ToMemOperand(op);
@@ -689,10 +692,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -913,6 +912,14 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1113,36 +1120,70 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
}
-void LCodeGen::DoModI(LModI* instr) {
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
HMod* hmod = instr->hydrogen();
- HValue* left = hmod->left();
- HValue* right = hmod->right();
- if (hmod->RightIsPowerOf2()) {
- // TODO(svenpanne) We should really do the strength reduction on the
- // Hydrogen level.
- Register left_reg = ToRegister(instr->left());
- Register result_reg = ToRegister(instr->result());
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ cmp(dividend, Operand::Zero());
+ __ b(pl, &dividend_is_not_negative);
+ // Note that this is correct even for kMinInt operands.
+ __ rsb(dividend, dividend, Operand::Zero());
+ __ and_(dividend, dividend, Operand(mask));
+ __ rsb(dividend, dividend, Operand::Zero(), SetCC);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ }
+ __ b(&done);
+ }
- // Note: The code below even works when right contains kMinInt.
- int32_t divisor = Abs(right->GetInteger32Constant());
+ __ bind(&dividend_is_not_negative);
+ __ and_(dividend, dividend, Operand(mask));
+ __ bind(&done);
+}
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ cmp(left_reg, Operand::Zero());
- __ b(pl, &left_is_not_negative);
- __ rsb(result_reg, left_reg, Operand::Zero());
- __ and_(result_reg, result_reg, Operand(divisor - 1));
- __ rsb(result_reg, result_reg, Operand::Zero(), SetCC);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
- }
- __ b(&done);
- }
- __ bind(&left_is_not_negative);
- __ and_(result_reg, left_reg, Operand(divisor - 1));
- __ bind(&done);
- } else if (CpuFeatures::IsSupported(SUDIV)) {
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ __ mov(ip, Operand(Abs(divisor)));
+ __ smull(result, ip, result, ip);
+ __ sub(result, dividend, result, SetCC);
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ b(ne, &remainder_not_zero);
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(lt, instr->environment());
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+ if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm(), SUDIV);
Register left_reg = ToRegister(instr->left());
@@ -1152,14 +1193,14 @@ void LCodeGen::DoModI(LModI* instr) {
Label done;
// Check for x % 0, sdiv might signal an exception. We have to deopt in this
// case because we can't return a NaN.
- if (right->CanBeZero()) {
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
// Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
// want. We have to deopt if we care about -0, because we can't return that.
- if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
Label no_overflow_possible;
__ cmp(left_reg, Operand(kMinInt));
__ b(ne, &no_overflow_possible);
@@ -1182,9 +1223,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ mls(result_reg, result_reg, right_reg, left_reg);
// If we care about -0, test if the dividend is <0 and the result is 0.
- if (left->CanBeNegative() &&
- hmod->CanBeZero() &&
- hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
@@ -1211,7 +1250,7 @@ void LCodeGen::DoModI(LModI* instr) {
Label done;
// Check for x % 0, we have to deopt in this case because we can't return a
// NaN.
- if (right->CanBeZero()) {
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
@@ -1240,9 +1279,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ sub(result_reg, left_reg, scratch, SetCC);
// If we care about -0, test if the dividend is <0 and the result is 0.
- if (left->CanBeNegative() &&
- hmod->CanBeZero() &&
- hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
DeoptimizeIf(mi, instr->environment());
@@ -1252,165 +1289,94 @@ void LCodeGen::DoModI(LModI* instr) {
}
-void LCodeGen::EmitSignedIntegerDivisionByConstant(
- Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment) {
- ASSERT(!AreAliased(dividend, scratch, ip));
- ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
+ ASSERT(!result.is(dividend));
- uint32_t divisor_abs = abs(divisor);
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ cmp(dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ tst(dividend, Operand(mask));
+ DeoptimizeIf(ne, instr->environment());
+ }
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
+ if (divisor == -1) { // Nice shortcut, not needed for correctness.
+ __ rsb(result, dividend, Operand(0));
+ return;
+ }
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (shift == 0) {
+ __ mov(result, dividend);
+ } else if (shift == 1) {
+ __ add(result, dividend, Operand(dividend, LSR, 31));
+ } else {
+ __ mov(result, Operand(dividend, ASR, 31));
+ __ add(result, dividend, Operand(result, LSR, 32 - shift));
+ }
+ if (shift > 0) __ mov(result, Operand(result, ASR, shift));
+ if (divisor < 0) __ rsb(result, result, Operand(0));
+}
- switch (divisor_abs) {
- case 0:
- DeoptimizeIf(al, environment);
- return;
- case 1:
- if (divisor > 0) {
- __ Move(result, dividend);
- } else {
- __ rsb(result, dividend, Operand::Zero(), SetCC);
- DeoptimizeIf(vs, environment);
- }
- // Compute the remainder.
- __ mov(remainder, Operand::Zero());
- return;
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
- default:
- if (IsPowerOf2(divisor_abs)) {
- // Branch and condition free code for integer division by a power
- // of two.
- int32_t power = WhichPowerOf2(divisor_abs);
- if (power > 1) {
- __ mov(scratch, Operand(dividend, ASR, power - 1));
- }
- __ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
- __ mov(result, Operand(scratch, ASR, power));
- // Negate if necessary.
- // We don't need to check for overflow because the case '-1' is
- // handled separately.
- if (divisor < 0) {
- ASSERT(divisor != -1);
- __ rsb(result, result, Operand::Zero());
- }
- // Compute the remainder.
- if (divisor > 0) {
- __ sub(remainder, dividend, Operand(result, LSL, power));
- } else {
- __ add(remainder, dividend, Operand(result, LSL, power));
- }
- return;
- } else {
- // Use magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer’s Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- // Branch and condition free code for integer division by a power
- // of two.
- const int32_t M = magic_numbers.M;
- const int32_t s = magic_numbers.s + power_of_2_factor;
-
- __ mov(ip, Operand(M));
- __ smull(ip, scratch, dividend, ip);
- if (M < 0) {
- __ add(scratch, scratch, Operand(dividend));
- }
- if (s > 0) {
- __ mov(scratch, Operand(scratch, ASR, s));
- }
- __ add(result, scratch, Operand(dividend, LSR, 31));
- if (divisor < 0) __ rsb(result, result, Operand::Zero());
- // Compute the remainder.
- __ mov(ip, Operand(divisor));
- // This sequence could be replaced with 'mls' when
- // it gets implemented.
- __ mul(scratch, result, ip);
- __ sub(remainder, dividend, scratch);
- }
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
}
-}
-
-void LCodeGen::DoDivI(LDivI* instr) {
- if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
- const Register dividend = ToRegister(instr->left());
- const Register result = ToRegister(instr->result());
- int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmp(dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
- if (test_value != 0) {
- if (instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- __ sub(result, dividend, Operand::Zero(), SetCC);
- __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
- __ mov(result, Operand(result, ASR, power));
- if (divisor > 0) __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
- if (divisor < 0) __ rsb(result, result, Operand::Zero(), LeaveCC, gt);
- return; // Don't fall through to "__ rsb" below.
- } else {
- // Deoptimize if remainder is not 0.
- __ tst(dividend, Operand(test_value));
- DeoptimizeIf(ne, instr->environment());
- __ mov(result, Operand(dividend, ASR, power));
- if (divisor < 0) __ rsb(result, result, Operand(0));
- }
- } else {
- if (divisor < 0) {
- __ rsb(result, dividend, Operand(0));
- } else {
- __ Move(result, dividend);
- }
- }
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
- return;
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ mov(ip, Operand(divisor));
+ __ smull(scratch0(), ip, result, ip);
+ __ sub(scratch0(), scratch0(), dividend, SetCC);
+ DeoptimizeIf(ne, instr->environment());
}
+}
- const Register left = ToRegister(instr->left());
- const Register right = ToRegister(instr->right());
- const Register result = ToRegister(instr->result());
+
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+ Register result = ToRegister(instr->result());
// Check for x / 0.
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label positive;
if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
// Do the test only if it hadn't be done above.
@@ -1423,10 +1389,9 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
// Check for (kMinInt / -1).
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow) &&
+ if (hdiv->CheckFlag(HValue::kCanOverflow) &&
(!CpuFeatures::IsSupported(SUDIV) ||
- !instr->hydrogen_value()->CheckFlag(
- HValue::kAllUsesTruncatingToInt32))) {
+ !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
// We don't need to check for overflow when truncating with sdiv
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(left, Operand(kMinInt));
@@ -1437,18 +1402,9 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm(), SUDIV);
__ sdiv(result, left, right);
-
- if (!instr->hydrogen_value()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- // Compute remainder and deopt if it's not zero.
- const Register remainder = scratch0();
- __ mls(remainder, result, right, left);
- __ cmp(remainder, Operand::Zero());
- DeoptimizeIf(ne, instr->environment());
- }
} else {
- const DoubleRegister vleft = ToDoubleRegister(instr->temp());
- const DoubleRegister vright = double_scratch0();
+ DoubleRegister vleft = ToDoubleRegister(instr->temp());
+ DoubleRegister vright = double_scratch0();
__ vmov(double_scratch0().low(), left);
__ vcvt_f64_s32(vleft, double_scratch0().low());
__ vmov(double_scratch0().low(), right);
@@ -1456,15 +1412,23 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ vdiv(vleft, vleft, vright); // vleft now contains the result.
__ vcvt_s32_f64(double_scratch0().low(), vleft);
__ vmov(result, double_scratch0().low());
+ }
- if (!instr->hydrogen_value()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- // Deopt if exact conversion to integer was not possible.
- // Use vright as scratch register.
- __ vcvt_f64_s32(double_scratch0(), double_scratch0().low());
- __ VFPCompareAndSetFlags(vleft, double_scratch0());
- DeoptimizeIf(ne, instr->environment());
- }
+ if (hdiv->IsMathFloorOfDiv()) {
+ Label done;
+ Register remainder = scratch0();
+ __ mls(remainder, result, right, left);
+ __ cmp(remainder, Operand::Zero());
+ __ b(eq, &done);
+ __ eor(remainder, remainder, Operand(right));
+ __ add(result, result, Operand(remainder, ASR, 31));
+ __ bind(&done);
+ } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ // Compute remainder and deopt if it's not zero.
+ Register remainder = scratch0();
+ __ mls(remainder, result, right, left);
+ __ cmp(remainder, Operand::Zero());
+ DeoptimizeIf(ne, instr->environment());
}
}
@@ -1493,71 +1457,84 @@ void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
}
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- const Register result = ToRegister(instr->result());
- const Register left = ToRegister(instr->left());
- const Register remainder = ToRegister(instr->temp());
- const Register scratch = scratch0();
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ Register result = ToRegister(instr->result());
+ int32_t divisor = instr->divisor();
+
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ if (divisor == 1) return;
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ mov(result, Operand(dividend, ASR, shift));
+ return;
+ }
- if (!CpuFeatures::IsSupported(SUDIV)) {
- // If the CPU doesn't support sdiv instruction, we only optimize when we
- // have magic numbers for the divisor. The standard integer division routine
- // is usually slower than transitionning to VFP.
- ASSERT(instr->right()->IsConstantOperand());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
- if (divisor < 0) {
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ // If the divisor is negative, we have to negate and handle edge cases.
+ __ rsb(result, dividend, Operand::Zero(), SetCC);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ // Note that we could emit branch-free code, but that would need one more
+ // register.
+ if (divisor == -1) {
+ DeoptimizeIf(vs, instr->environment());
+ __ mov(result, Operand(dividend, ASR, shift));
+ } else {
+ __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
+ __ mov(result, Operand(dividend, ASR, shift), LeaveCC, vc);
}
- EmitSignedIntegerDivisionByConstant(result,
- left,
- divisor,
- remainder,
- scratch,
- instr->environment());
- // We performed a truncating division. Correct the result if necessary.
- __ cmp(remainder, Operand::Zero());
- __ teq(remainder, Operand(divisor), ne);
- __ sub(result, result, Operand(1), LeaveCC, mi);
} else {
- CpuFeatureScope scope(masm(), SUDIV);
- const Register right = ToRegister(instr->right());
-
- // Check for x / 0.
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ __ mov(result, Operand(dividend, ASR, shift));
+ }
+}
- // Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmp(left, Operand(kMinInt));
- __ cmp(right, Operand(-1), eq);
- DeoptimizeIf(eq, instr->environment());
- }
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(right, Operand::Zero());
- __ cmp(left, Operand::Zero(), mi);
- // "right" can't be null because the code would have already been
- // deoptimized. The Z flag is set only if (right < 0) and (left == 0).
- // In this case we need to deoptimize to produce a -0.
- DeoptimizeIf(eq, instr->environment());
- }
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
- Label done;
- __ sdiv(result, left, right);
- // If both operands have the same sign then we are done.
- __ eor(remainder, left, Operand(right), SetCC);
- __ b(pl, &done);
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
- // Check if the result needs to be corrected.
- __ mls(remainder, result, right, left);
- __ cmp(remainder, Operand::Zero());
- __ sub(result, result, Operand(1), LeaveCC, ne);
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
- __ bind(&done);
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
+ return;
}
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp());
+ ASSERT(!temp.is(dividend) && !temp.is(result));
+ Label needs_adjustment, done;
+ __ cmp(dividend, Operand::Zero());
+ __ b(divisor > 0 ? lt : gt, &needs_adjustment);
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
+ __ jmp(&done);
+ __ bind(&needs_adjustment);
+ __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(result, temp, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
+ __ sub(result, result, Operand(1));
+ __ bind(&done);
}
@@ -1676,7 +1653,7 @@ void LCodeGen::DoBitI(LBitI* instr) {
Register result = ToRegister(instr->result());
Operand right(no_reg);
- if (right_op->IsStackSlot() || right_op->IsArgument()) {
+ if (right_op->IsStackSlot()) {
right = Operand(EmitLoadRegister(right_op, ip));
} else {
ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
@@ -1799,7 +1776,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, ip);
__ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
@@ -1820,7 +1797,7 @@ void LCodeGen::DoRSubI(LRSubI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, ip);
__ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
@@ -1993,7 +1970,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, ip);
__ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
@@ -2742,9 +2719,6 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register temp = ToRegister(instr->temp());
Register result = ToRegister(instr->result());
- ASSERT(object.is(r0));
- ASSERT(result.is(r0));
-
// A Smi is not instance of anything.
__ JumpIfSmi(object, &false_result);
@@ -2802,9 +2776,6 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check) {
- Register result = ToRegister(instr->result());
- ASSERT(result.is(r0));
-
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kArgsInRegisters);
@@ -2817,37 +2788,32 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
LoadContextFromDeferred(instr->context());
- // Get the temp register reserved by the instruction. This needs to be r4 as
- // its slot of the pushing of safepoint registers is used to communicate the
- // offset to the location of the map check.
- Register temp = ToRegister(instr->temp());
- ASSERT(temp.is(r4));
__ Move(InstanceofStub::right(), instr->function());
- static const int kAdditionalDelta = 5;
+ static const int kAdditionalDelta = 4;
// Make sure that code size is predicable, since we use specific constants
// offsets in the code to find embedded values..
- PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize);
+ PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
__ bind(&before_push_delta);
__ BlockConstPoolFor(kAdditionalDelta);
- __ mov(temp, Operand(delta * kPointerSize));
+ // r5 is used to communicate the offset to the location of the map check.
+ __ mov(r5, Operand(delta * kPointerSize));
// The mov above can generate one or two instructions. The delta was computed
// for two instructions, so we need to pad here in case of one instruction.
if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
__ nop();
}
- __ StoreToSafepointRegisterSlot(temp, temp);
CallCodeGeneric(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- // Put the result value into the result register slot and
+ // Put the result value (r0) into the result register slot and
// restore all registers.
- __ StoreToSafepointRegisterSlot(result, result);
+ __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result()));
}
@@ -3225,7 +3191,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3573,7 +3539,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ push(scratch0());
__ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ push(scratch0());
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
}
@@ -3664,7 +3630,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
@@ -3881,6 +3847,13 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
}
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ __ clz(result, input);
+}
+
+
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->function()).is(r1));
@@ -3964,8 +3937,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
__ mov(r0, Operand(instr->arity()));
// No cell in r2 for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ mov(r2, Operand(undefined_value));
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3977,7 +3949,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
__ mov(r0, Operand(instr->arity()));
- __ mov(r2, Operand(factory()->undefined_value()));
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -4057,12 +4029,21 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
Handle<Map> transition = instr->transition();
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ ASSERT(!(representation.IsSmi() &&
+ instr->value()->IsConstantOperand() &&
+ !IsSmi(LConstantOperand::cast(instr->value()))));
+ if (representation.IsHeapObject()) {
Register value = ToRegister(instr->value());
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ SmiTst(value);
DeoptimizeIf(eq, instr->environment());
+
+ // We know that value is a smi now, so we can omit the check below.
+ check_needed = OMIT_SMI_CHECK;
}
} else if (representation.IsDouble()) {
ASSERT(transition.is_null());
@@ -4092,9 +4073,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
// Do the store.
Register value = ToRegister(instr->value());
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) {
MemOperand operand = FieldMemOperand(object, offset);
__ Store(value, operand, representation);
@@ -4136,8 +4114,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic = StoreIC::initialize_stub(isolate(),
- instr->strict_mode_flag());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -4258,7 +4235,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4374,7 +4351,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ Handle<Code> ic = instr->strict_mode() == STRICT
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
@@ -4486,7 +4463,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
+ CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
instr->context());
__ AssertSmi(r0);
__ SmiUntag(r0);
@@ -4561,20 +4538,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
-void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- ASSERT(output->IsRegister());
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- __ SmiTag(ToRegister(output), ToRegister(input), SetCC);
- DeoptimizeIf(vs, instr->environment());
- } else {
- __ SmiTag(ToRegister(output), ToRegister(input));
- }
-}
-
-
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4585,27 +4548,17 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
-void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- __ tst(ToRegister(input), Operand(0xc0000000));
- DeoptimizeIf(ne, instr->environment());
- }
- __ SmiTag(ToRegister(output), ToRegister(input));
-}
-
-
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- SIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ SIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4628,9 +4581,11 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- UNSIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ UNSIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4648,18 +4603,19 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
-void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness) {
- Label slow;
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness) {
+ Label done, slow;
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
+ Register tmp1 = scratch0();
+ Register tmp2 = ToRegister(temp1);
+ Register tmp3 = ToRegister(temp2);
LowDwVfpRegister dbl_scratch = double_scratch0();
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- Label done;
if (signedness == SIGNED_INT32) {
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
@@ -4676,38 +4632,40 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
}
if (FLAG_inline_new) {
- __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
- __ Move(dst, r5);
+ __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
__ b(&done);
}
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ mov(dst, Operand::Zero());
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ mov(ip, Operand::Zero());
- __ StoreToSafepointRegisterSlot(ip, dst);
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ Move(dst, r0);
- __ sub(dst, dst, Operand(kHeapObjectTag));
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ sub(r0, r0, Operand(kHeapObjectTag));
+ __ StoreToSafepointRegisterSlot(r0, dst);
+ }
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
__ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
__ add(dst, dst, Operand(kHeapObjectTag));
- __ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -4756,11 +4714,11 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
// NumberTagI and NumberTagD use the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ sub(r0, r0, Operand(kHeapObjectTag));
@@ -4769,8 +4727,21 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ tst(input, Operand(0xc0000000));
+ DeoptimizeIf(ne, instr->environment());
+ }
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ SmiTag(output, input, SetCC);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ SmiTag(output, input);
+ }
}
@@ -5220,6 +5191,26 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ DwVfpRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ VmovHigh(result_reg, value_reg);
+ } else {
+ __ VmovLow(result_reg, value_reg);
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ DwVfpRegister result_reg = ToDoubleRegister(instr->result());
+ __ VmovHigh(result_reg, hi_reg);
+ __ VmovLow(result_reg, lo_reg);
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
@@ -5328,7 +5319,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(flags));
CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(r0, result);
}
@@ -5362,7 +5353,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ mov(r4, Operand(instr->hydrogen()->pattern()));
__ mov(r3, Operand(instr->hydrogen()->flags()));
__ Push(r6, r5, r4, r3);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
__ mov(r1, r0);
__ bind(&materialized);
@@ -5375,7 +5366,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&runtime_allocate);
__ mov(r0, Operand(Smi::FromInt(size)));
__ Push(r1, r0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
__ pop(r1);
__ bind(&allocated);
@@ -5390,7 +5381,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ mov(r2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -5399,7 +5390,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
__ mov(r1, Operand(pretenure ? factory()->true_value()
: factory()->false_value()));
__ Push(cp, r2, r1);
- CallRuntime(Runtime::kNewClosure, 3, instr);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
}
}
@@ -5548,7 +5539,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5584,7 +5575,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
LoadContextFromDeferred(instr->context());
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
ASSERT(instr->HasEnvironment());
@@ -5622,10 +5613,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index d58c18f6c..21da500d0 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -126,9 +126,11 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness);
+ void DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
@@ -162,9 +164,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
+ StrictMode strict_mode() const { return info()->strict_mode(); }
Scope* scope() const { return scope_; }
@@ -348,17 +348,6 @@ class LCodeGen: public LCodeGenBase {
int* offset,
AllocationSiteMode mode);
- // Emit optimized code for integer division.
- // Inputs are signed.
- // All registers are clobbered.
- // If 'remainder' is no_reg, it is not computed.
- void EmitSignedIntegerDivisionByConstant(Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment);
-
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 77c514ff5..2bfe09f76 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -133,6 +133,12 @@ void MacroAssembler::Call(Address target,
set_predictable_code_size(true);
}
+#ifdef DEBUG
+ // Check the expected size before generating code to ensure we assume the same
+ // constant pool availability (e.g., whether constant pool is full or not).
+ int expected_size = CallSize(target, rmode, cond);
+#endif
+
// Call sequence on V7 or later may be :
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
@@ -153,7 +159,7 @@ void MacroAssembler::Call(Address target,
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
- ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
+ ASSERT_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
if (mode == NEVER_INLINE_TARGET_ADDRESS) {
set_predictable_code_size(old_predictable_code_size);
}
@@ -888,6 +894,16 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
+void MacroAssembler::LoadConstantPoolPointerRegister() {
+ if (FLAG_enable_ool_constant_pool) {
+ int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize -
+ pc_offset() - Instruction::kPCReadOffset;
+ ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
+ ldr(pp, MemOperand(pc, constant_pool_offset));
+ }
+}
+
+
void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
if (frame_mode == BUILD_STUB_FRAME) {
PushFixedFrame();
@@ -912,22 +928,20 @@ void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
-}
-
-
-void MacroAssembler::LoadConstantPoolPointerRegister() {
if (FLAG_enable_ool_constant_pool) {
- int constant_pool_offset =
- Code::kConstantPoolOffset - Code::kHeaderSize - pc_offset() - 8;
- ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
- ldr(pp, MemOperand(pc, constant_pool_offset));
+ LoadConstantPoolPointerRegister();
+ set_constant_pool_available(true);
}
}
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
+void MacroAssembler::EnterFrame(StackFrame::Type type,
+ bool load_constant_pool) {
// r0-r3: preserved
PushFixedFrame();
+ if (FLAG_enable_ool_constant_pool && load_constant_pool) {
+ LoadConstantPoolPointerRegister();
+ }
mov(ip, Operand(Smi::FromInt(type)));
push(ip);
mov(ip, Operand(CodeObject()));
@@ -975,6 +989,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
}
if (FLAG_enable_ool_constant_pool) {
str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
+ LoadConstantPoolPointerRegister();
}
mov(ip, Operand(CodeObject()));
str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
@@ -1045,6 +1060,8 @@ int MacroAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count,
bool restore_context) {
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
@@ -1059,7 +1076,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
str(r3, MemOperand(ip));
-
// Restore current context from top and clear it in debug mode.
if (restore_context) {
mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
@@ -1366,6 +1382,11 @@ void MacroAssembler::JumpToHandlerEntry() {
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
// r0 = exception, r1 = code object, r2 = state.
+
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+ if (FLAG_enable_ool_constant_pool) {
+ ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset)); // Constant pool.
+ }
ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
@@ -2411,7 +2432,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
{
FrameScope frame(this, StackFrame::INTERNAL);
CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()),
0);
}
jmp(&exception_handled);
@@ -2806,16 +2827,8 @@ void MacroAssembler::Check(Condition cond, BailoutReason reason) {
void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -2827,25 +2840,24 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- mov(r0, Operand(p0));
- push(r0);
- mov(r0, Operand(Smi::FromInt(p1 - p0)));
+ mov(r0, Operand(Smi::FromInt(reason)));
push(r0);
+
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// will not return here
if (is_const_pool_blocked()) {
// If the calling code cares about the exact number of
// instructions generated, we insert padding here to keep the size
// of the Abort macro constant.
- static const int kExpectedAbortInstructions = 10;
+ static const int kExpectedAbortInstructions = 7;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
ASSERT(abort_instructions <= kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
@@ -2899,31 +2911,6 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- ldr(map_out, FieldMemOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
ldr(function,
@@ -2936,19 +2923,6 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
-void MacroAssembler::LoadArrayFunction(Register function) {
- // Load the global or builtins object from the current context.
- ldr(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the global context from the global or builtins object.
- ldr(function,
- FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the array function from the native context.
- ldr(function,
- MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch) {
@@ -3070,6 +3044,20 @@ void MacroAssembler::AssertName(Register object) {
}
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ CompareRoot(object, Heap::kUndefinedValueRootIndex);
+ b(eq, &done_checking);
+ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ Assert(eq, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
if (emit_debug_code()) {
@@ -3579,22 +3567,31 @@ void MacroAssembler::CallCFunctionHelper(Register function,
void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
- Register result) {
+ Register result) {
const uint32_t kLdrOffsetMask = (1 << 12) - 1;
- const int32_t kPCRegOffset = 2 * kPointerSize;
ldr(result, MemOperand(ldr_location));
if (emit_debug_code()) {
- // Check that the instruction is a ldr reg, [pc + offset] .
- and_(result, result, Operand(kLdrPCPattern));
- cmp(result, Operand(kLdrPCPattern));
- Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
+ // Check that the instruction is a ldr reg, [<pc or pp> + offset] .
+ if (FLAG_enable_ool_constant_pool) {
+ and_(result, result, Operand(kLdrPpPattern));
+ cmp(result, Operand(kLdrPpPattern));
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromPp);
+ } else {
+ and_(result, result, Operand(kLdrPCPattern));
+ cmp(result, Operand(kLdrPCPattern));
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
+ }
// Result was clobbered. Restore it.
ldr(result, MemOperand(ldr_location));
}
// Get the address of the constant.
and_(result, result, Operand(kLdrOffsetMask));
- add(result, ldr_location, Operand(result));
- add(result, result, Operand(kPCRegOffset));
+ if (FLAG_enable_ool_constant_pool) {
+ add(result, pp, Operand(result));
+ } else {
+ add(result, ldr_location, Operand(result));
+ add(result, result, Operand(Instruction::kPCReadOffset));
+ }
}
@@ -3849,9 +3846,9 @@ void MacroAssembler::Throw(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
} else {
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
}
// will not return here
if (is_const_pool_blocked()) {
@@ -4079,6 +4076,26 @@ void CodePatcher::EmitCondition(Condition cond) {
}
+void MacroAssembler::TruncatingDiv(Register result,
+ Register dividend,
+ int32_t divisor) {
+ ASSERT(!dividend.is(result));
+ ASSERT(!dividend.is(ip));
+ ASSERT(!result.is(ip));
+ MultiplierAndShift ms(divisor);
+ mov(ip, Operand(ms.multiplier()));
+ smull(ip, result, dividend, ip);
+ if (divisor > 0 && ms.multiplier() < 0) {
+ add(result, result, Operand(dividend));
+ }
+ if (divisor < 0 && ms.multiplier() > 0) {
+ sub(result, result, Operand(dividend));
+ }
+ if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift()));
+ add(result, result, Operand(dividend, LSR, 31));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 7861d42aa..6b6ecd32d 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -540,9 +540,6 @@ class MacroAssembler: public Assembler {
// Generates function and stub prologue code.
void Prologue(PrologueFrameMode frame_mode);
- // Loads the constant pool pointer (pp) register.
- void LoadConstantPoolPointerRegister();
-
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
void EnterExitFrame(bool save_doubles, int stack_space = 0);
@@ -570,14 +567,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
void LoadGlobalFunction(int index, Register function);
- void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -1162,6 +1152,10 @@ class MacroAssembler: public Assembler {
}
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged and ip gets clobbered. Dividend and result must be different.
+ void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+
// ---------------------------------------------------------------------------
// StatsCounter support
@@ -1296,6 +1290,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
// Abort execution if reg is not the root value with the given index,
// enabled via --debug-code.
void AssertIsRoot(Register reg, Heap::RootListIndex index);
@@ -1390,7 +1388,7 @@ class MacroAssembler: public Assembler {
}
// Activation support.
- void EnterFrame(StackFrame::Type type);
+ void EnterFrame(StackFrame::Type type, bool load_constant_pool = false);
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type);
@@ -1467,6 +1465,9 @@ class MacroAssembler: public Assembler {
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+ // Loads the constant pool pointer (pp) register.
+ void LoadConstantPoolPointerRegister();
+
bool generating_stub_;
bool has_frame_;
// This handle will be patched with the code object on installation.
@@ -1516,6 +1517,70 @@ class CodePatcher {
};
+class FrameAndConstantPoolScope {
+ public:
+ FrameAndConstantPoolScope(MacroAssembler* masm, StackFrame::Type type)
+ : masm_(masm),
+ type_(type),
+ old_has_frame_(masm->has_frame()),
+ old_constant_pool_available_(masm->is_constant_pool_available()) {
+ masm->set_has_frame(true);
+ masm->set_constant_pool_available(true);
+ if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
+ masm->EnterFrame(type, !old_constant_pool_available_);
+ }
+ }
+
+ ~FrameAndConstantPoolScope() {
+ masm_->LeaveFrame(type_);
+ masm_->set_has_frame(old_has_frame_);
+ masm_->set_constant_pool_available(old_constant_pool_available_);
+ }
+
+ // Normally we generate the leave-frame code when this object goes
+ // out of scope. Sometimes we may need to generate the code somewhere else
+ // in addition. Calling this will achieve that, but the object stays in
+ // scope, the MacroAssembler is still marked as being in a frame scope, and
+ // the code will be generated again when it goes out of scope.
+ void GenerateLeaveFrame() {
+ ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
+ masm_->LeaveFrame(type_);
+ }
+
+ private:
+ MacroAssembler* masm_;
+ StackFrame::Type type_;
+ bool old_has_frame_;
+ bool old_constant_pool_available_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FrameAndConstantPoolScope);
+};
+
+
+// Class for scoping the the unavailability of constant pool access.
+class ConstantPoolUnavailableScope {
+ public:
+ explicit ConstantPoolUnavailableScope(MacroAssembler* masm)
+ : masm_(masm),
+ old_constant_pool_available_(masm->is_constant_pool_available()) {
+ if (FLAG_enable_ool_constant_pool) {
+ masm_->set_constant_pool_available(false);
+ }
+ }
+ ~ConstantPoolUnavailableScope() {
+ if (FLAG_enable_ool_constant_pool) {
+ masm_->set_constant_pool_available(old_constant_pool_available_);
+ }
+ }
+
+ private:
+ MacroAssembler* masm_;
+ int old_constant_pool_available_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope);
+};
+
+
// -----------------------------------------------------------------------------
// Static helper functions.
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index ac36687fc..8f7c1e8bb 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -796,6 +796,10 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
}
+Simulator::~Simulator() {
+}
+
+
// When the generated code calls an external reference we need to catch that in
// the simulator. The external reference will be a function compiled for the
// host architecture. We need to call that function instead of trying to
@@ -3466,7 +3470,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl signed
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
@@ -3489,7 +3494,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl unsigned
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 0af5162e9..24d7fe58c 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -207,6 +207,10 @@ class Simulator {
void set_pc(int32_t value);
int32_t get_pc() const;
+ Address get_sp() {
+ return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+ }
+
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 694a4ed68..c595e4274 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -322,7 +322,7 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
bool inobject,
int index,
Representation representation) {
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -351,60 +351,6 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
-// Generate code to check if an object is a string. If the object is a
-// heap object, its map's instance type is left in the scratch1 register.
-// If this is not needed, scratch1 and scratch2 may be the same register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* smi,
- Label* non_string_object) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ and_(scratch2, scratch1, Operand(kIsNotStringMask));
- // The cast is to resolve the overload for the argument of 0x0.
- __ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag)));
- __ b(ne, non_string_object);
-}
-
-
-// Generate code to load the length from a string object and return the length.
-// If the receiver object is not a string or a wrapped string object the
-// execution continues at the miss label. The register containing the
-// receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
-
- // Load length directly from the string.
- __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
- __ Ret();
-
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, Operand(JS_VALUE_TYPE));
- __ b(ne, miss);
-
- // Unwrap the value and check if the wrapped value is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ Ret();
-}
-
-
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@@ -481,11 +427,11 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ Move(scratch1, constant);
__ cmp(value_reg, scratch1);
__ b(ne, miss_label);
- } else if (FLAG_track_fields && representation.IsSmi()) {
+ } else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
@@ -559,15 +505,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ str(storage_reg, FieldMemOperand(receiver_reg, offset));
} else {
__ str(value_reg, FieldMemOperand(receiver_reg, offset));
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
@@ -585,15 +531,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Get the properties array
__ ldr(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ str(storage_reg, FieldMemOperand(scratch1, offset));
} else {
__ str(value_reg, FieldMemOperand(scratch1, offset));
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
@@ -643,11 +589,11 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -688,7 +634,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
int offset = object->map()->instance_size() + (index * kPointerSize);
__ str(value_reg, FieldMemOperand(receiver_reg, offset));
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@@ -712,7 +658,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ str(value_reg, FieldMemOperand(scratch1, offset));
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@@ -783,13 +729,14 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map,
- Register receiver,
- Register scratch_in,
- int argc,
- Register* values) {
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
ASSERT(!receiver.is(scratch_in));
__ push(receiver);
// Write the arguments to stack frame.
@@ -854,7 +801,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ mov(api_function_address, Operand(ref));
// Jump to stub.
- CallApiFunctionStub stub(true, call_data_undefined, argc);
+ CallApiFunctionStub stub(is_store, call_data_undefined, argc);
__ TailCallStub(&stub);
}
@@ -878,9 +825,6 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
- // Make sure that the type feedback oracle harvests the receiver map.
- // TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ mov(scratch1, Operand(receiver_map));
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -1076,15 +1020,6 @@ void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization,
- Handle<Map> receiver_map) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver_map,
- receiver(), scratch3(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
@@ -1173,7 +1108,7 @@ void LoadStubCompiler::GenerateLoadInterceptor(
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
{
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ FrameAndConstantPoolScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
__ Push(receiver(), holder_reg, this->name());
} else {
@@ -1260,24 +1195,6 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, handle(object->map()),
- receiver(), scratch3(), 1, values);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
@@ -1285,20 +1202,16 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
// -- lr : return address
// -----------------------------------
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- Register receiver = r1;
- Register value = r0;
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
- __ push(value);
+ __ push(value());
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
@@ -1308,7 +1221,7 @@ void StoreStubCompiler::GenerateStoreViaSetter(
FieldMemOperand(
receiver, JSGlobalObject::kGlobalReceiverOffset));
}
- __ Push(receiver, value);
+ __ Push(receiver, value());
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
@@ -1336,21 +1249,6 @@ void StoreStubCompiler::GenerateStoreViaSetter(
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
- }
-
- // Stub is never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
@@ -1358,10 +1256,6 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 3, 1);
- // Handle store cache miss.
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
@@ -1396,16 +1290,21 @@ Register* KeyedLoadStubCompiler::registers() {
}
+Register StoreStubCompiler::value() {
+ return r0;
+}
+
+
Register* StoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { r1, r2, r0, r3, r4, r5 };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { r1, r2, r3, r4, r5 };
return registers;
}
Register* KeyedStoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { r2, r1, r0, r3, r4, r5 };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { r2, r1, r3, r4, r5 };
return registers;
}
@@ -1424,7 +1323,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
// -- lr : return address
// -----------------------------------
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
@@ -1537,6 +1436,17 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
}
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver(), value());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
diff --git a/deps/v8/src/arm64/OWNERS b/deps/v8/src/arm64/OWNERS
new file mode 100644
index 000000000..906a5ce64
--- /dev/null
+++ b/deps/v8/src/arm64/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
new file mode 100644
index 000000000..b56e3ed2a
--- /dev/null
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -0,0 +1,1229 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_ASSEMBLER_ARM64_INL_H_
+#define V8_ARM64_ASSEMBLER_ARM64_INL_H_
+
+#include "arm64/assembler-arm64.h"
+#include "cpu.h"
+#include "debug.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+void RelocInfo::apply(intptr_t delta) {
+ UNIMPLEMENTED();
+}
+
+
+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ Assembler::set_target_address_at(pc_, host_, target);
+ if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+
+inline unsigned CPURegister::code() const {
+ ASSERT(IsValid());
+ return reg_code;
+}
+
+
+inline CPURegister::RegisterType CPURegister::type() const {
+ ASSERT(IsValidOrNone());
+ return reg_type;
+}
+
+
+inline RegList CPURegister::Bit() const {
+ ASSERT(reg_code < (sizeof(RegList) * kBitsPerByte));
+ return IsValid() ? 1UL << reg_code : 0;
+}
+
+
+inline unsigned CPURegister::SizeInBits() const {
+ ASSERT(IsValid());
+ return reg_size;
+}
+
+
+inline int CPURegister::SizeInBytes() const {
+ ASSERT(IsValid());
+ ASSERT(SizeInBits() % 8 == 0);
+ return reg_size / 8;
+}
+
+
+inline bool CPURegister::Is32Bits() const {
+ ASSERT(IsValid());
+ return reg_size == 32;
+}
+
+
+inline bool CPURegister::Is64Bits() const {
+ ASSERT(IsValid());
+ return reg_size == 64;
+}
+
+
+inline bool CPURegister::IsValid() const {
+ if (IsValidRegister() || IsValidFPRegister()) {
+ ASSERT(!IsNone());
+ return true;
+ } else {
+ ASSERT(IsNone());
+ return false;
+ }
+}
+
+
+inline bool CPURegister::IsValidRegister() const {
+ return IsRegister() &&
+ ((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)) &&
+ ((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode));
+}
+
+
+inline bool CPURegister::IsValidFPRegister() const {
+ return IsFPRegister() &&
+ ((reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits)) &&
+ (reg_code < kNumberOfFPRegisters);
+}
+
+
+inline bool CPURegister::IsNone() const {
+ // kNoRegister types should always have size 0 and code 0.
+ ASSERT((reg_type != kNoRegister) || (reg_code == 0));
+ ASSERT((reg_type != kNoRegister) || (reg_size == 0));
+
+ return reg_type == kNoRegister;
+}
+
+
+inline bool CPURegister::Is(const CPURegister& other) const {
+ ASSERT(IsValidOrNone() && other.IsValidOrNone());
+ return (reg_code == other.reg_code) && (reg_size == other.reg_size) &&
+ (reg_type == other.reg_type);
+}
+
+
+inline bool CPURegister::IsRegister() const {
+ return reg_type == kRegister;
+}
+
+
+inline bool CPURegister::IsFPRegister() const {
+ return reg_type == kFPRegister;
+}
+
+
+inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
+ return (reg_size == other.reg_size) && (reg_type == other.reg_type);
+}
+
+
+inline bool CPURegister::IsValidOrNone() const {
+ return IsValid() || IsNone();
+}
+
+
+inline bool CPURegister::IsZero() const {
+ ASSERT(IsValid());
+ return IsRegister() && (reg_code == kZeroRegCode);
+}
+
+
+inline bool CPURegister::IsSP() const {
+ ASSERT(IsValid());
+ return IsRegister() && (reg_code == kSPRegInternalCode);
+}
+
+
+inline void CPURegList::Combine(const CPURegList& other) {
+ ASSERT(IsValid());
+ ASSERT(other.type() == type_);
+ ASSERT(other.RegisterSizeInBits() == size_);
+ list_ |= other.list();
+}
+
+
+inline void CPURegList::Remove(const CPURegList& other) {
+ ASSERT(IsValid());
+ if (other.type() == type_) {
+ list_ &= ~other.list();
+ }
+}
+
+
+inline void CPURegList::Combine(const CPURegister& other) {
+ ASSERT(other.type() == type_);
+ ASSERT(other.SizeInBits() == size_);
+ Combine(other.code());
+}
+
+
+inline void CPURegList::Remove(const CPURegister& other1,
+ const CPURegister& other2,
+ const CPURegister& other3,
+ const CPURegister& other4) {
+ if (!other1.IsNone() && (other1.type() == type_)) Remove(other1.code());
+ if (!other2.IsNone() && (other2.type() == type_)) Remove(other2.code());
+ if (!other3.IsNone() && (other3.type() == type_)) Remove(other3.code());
+ if (!other4.IsNone() && (other4.type() == type_)) Remove(other4.code());
+}
+
+
+inline void CPURegList::Combine(int code) {
+ ASSERT(IsValid());
+ ASSERT(CPURegister::Create(code, size_, type_).IsValid());
+ list_ |= (1UL << code);
+}
+
+
+inline void CPURegList::Remove(int code) {
+ ASSERT(IsValid());
+ ASSERT(CPURegister::Create(code, size_, type_).IsValid());
+ list_ &= ~(1UL << code);
+}
+
+
+inline Register Register::XRegFromCode(unsigned code) {
+ // This function returns the zero register when code = 31. The stack pointer
+ // can not be returned.
+ ASSERT(code < kNumberOfRegisters);
+ return Register::Create(code, kXRegSizeInBits);
+}
+
+
+inline Register Register::WRegFromCode(unsigned code) {
+ ASSERT(code < kNumberOfRegisters);
+ return Register::Create(code, kWRegSizeInBits);
+}
+
+
+inline FPRegister FPRegister::SRegFromCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return FPRegister::Create(code, kSRegSizeInBits);
+}
+
+
+inline FPRegister FPRegister::DRegFromCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return FPRegister::Create(code, kDRegSizeInBits);
+}
+
+
+inline Register CPURegister::W() const {
+ ASSERT(IsValidRegister());
+ return Register::WRegFromCode(reg_code);
+}
+
+
+inline Register CPURegister::X() const {
+ ASSERT(IsValidRegister());
+ return Register::XRegFromCode(reg_code);
+}
+
+
+inline FPRegister CPURegister::S() const {
+ ASSERT(IsValidFPRegister());
+ return FPRegister::SRegFromCode(reg_code);
+}
+
+
+inline FPRegister CPURegister::D() const {
+ ASSERT(IsValidFPRegister());
+ return FPRegister::DRegFromCode(reg_code);
+}
+
+
+// Operand.
+template<typename T>
+Operand::Operand(Handle<T> value) : reg_(NoReg) {
+ initialize_handle(value);
+}
+
+
+// Default initializer is for int types
+template<typename int_t>
+struct OperandInitializer {
+ static const bool kIsIntType = true;
+ static inline RelocInfo::Mode rmode_for(int_t) {
+ return sizeof(int_t) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
+ }
+ static inline int64_t immediate_for(int_t t) {
+ STATIC_ASSERT(sizeof(int_t) <= 8);
+ return t;
+ }
+};
+
+
+template<>
+struct OperandInitializer<Smi*> {
+ static const bool kIsIntType = false;
+ static inline RelocInfo::Mode rmode_for(Smi* t) {
+ return RelocInfo::NONE64;
+ }
+ static inline int64_t immediate_for(Smi* t) {;
+ return reinterpret_cast<int64_t>(t);
+ }
+};
+
+
+template<>
+struct OperandInitializer<ExternalReference> {
+ static const bool kIsIntType = false;
+ static inline RelocInfo::Mode rmode_for(ExternalReference t) {
+ return RelocInfo::EXTERNAL_REFERENCE;
+ }
+ static inline int64_t immediate_for(ExternalReference t) {;
+ return reinterpret_cast<int64_t>(t.address());
+ }
+};
+
+
+template<typename T>
+Operand::Operand(T t)
+ : immediate_(OperandInitializer<T>::immediate_for(t)),
+ reg_(NoReg),
+ rmode_(OperandInitializer<T>::rmode_for(t)) {}
+
+
+template<typename T>
+Operand::Operand(T t, RelocInfo::Mode rmode)
+ : immediate_(OperandInitializer<T>::immediate_for(t)),
+ reg_(NoReg),
+ rmode_(rmode) {
+ STATIC_ASSERT(OperandInitializer<T>::kIsIntType);
+}
+
+
+Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
+ : reg_(reg),
+ shift_(shift),
+ extend_(NO_EXTEND),
+ shift_amount_(shift_amount),
+ rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
+ ASSERT(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
+ ASSERT(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
+ ASSERT(!reg.IsSP());
+}
+
+
+Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
+ : reg_(reg),
+ shift_(NO_SHIFT),
+ extend_(extend),
+ shift_amount_(shift_amount),
+ rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
+ ASSERT(reg.IsValid());
+ ASSERT(shift_amount <= 4);
+ ASSERT(!reg.IsSP());
+
+ // Extend modes SXTX and UXTX require a 64-bit register.
+ ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
+}
+
+
+bool Operand::IsImmediate() const {
+ return reg_.Is(NoReg);
+}
+
+
+bool Operand::IsShiftedRegister() const {
+ return reg_.IsValid() && (shift_ != NO_SHIFT);
+}
+
+
+bool Operand::IsExtendedRegister() const {
+ return reg_.IsValid() && (extend_ != NO_EXTEND);
+}
+
+
+bool Operand::IsZero() const {
+ if (IsImmediate()) {
+ return immediate() == 0;
+ } else {
+ return reg().IsZero();
+ }
+}
+
+
+Operand Operand::ToExtendedRegister() const {
+ ASSERT(IsShiftedRegister());
+ ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
+ return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
+}
+
+
+int64_t Operand::immediate() const {
+ ASSERT(IsImmediate());
+ return immediate_;
+}
+
+
+Register Operand::reg() const {
+ ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ return reg_;
+}
+
+
+Shift Operand::shift() const {
+ ASSERT(IsShiftedRegister());
+ return shift_;
+}
+
+
+Extend Operand::extend() const {
+ ASSERT(IsExtendedRegister());
+ return extend_;
+}
+
+
+unsigned Operand::shift_amount() const {
+ ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ return shift_amount_;
+}
+
+
+Operand Operand::UntagSmi(Register smi) {
+ ASSERT(smi.Is64Bits());
+ return Operand(smi, ASR, kSmiShift);
+}
+
+
+Operand Operand::UntagSmiAndScale(Register smi, int scale) {
+ ASSERT(smi.Is64Bits());
+ ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize)));
+ if (scale > kSmiShift) {
+ return Operand(smi, LSL, scale - kSmiShift);
+ } else if (scale < kSmiShift) {
+ return Operand(smi, ASR, kSmiShift - scale);
+ }
+ return Operand(smi);
+}
+
+
+MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
+ : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
+ shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+}
+
+
+MemOperand::MemOperand(Register base,
+ Register regoffset,
+ Extend extend,
+ unsigned shift_amount)
+ : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
+ shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+ ASSERT(!regoffset.IsSP());
+ ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
+
+ // SXTX extend mode requires a 64-bit offset register.
+ ASSERT(regoffset.Is64Bits() || (extend != SXTX));
+}
+
+
+MemOperand::MemOperand(Register base,
+ Register regoffset,
+ Shift shift,
+ unsigned shift_amount)
+ : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
+ shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+ ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
+ ASSERT(shift == LSL);
+}
+
+
+MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
+ : base_(base), addrmode_(addrmode) {
+ ASSERT(base.Is64Bits() && !base.IsZero());
+
+ if (offset.IsImmediate()) {
+ offset_ = offset.immediate();
+
+ regoffset_ = NoReg;
+ } else if (offset.IsShiftedRegister()) {
+ ASSERT(addrmode == Offset);
+
+ regoffset_ = offset.reg();
+ shift_= offset.shift();
+ shift_amount_ = offset.shift_amount();
+
+ extend_ = NO_EXTEND;
+ offset_ = 0;
+
+ // These assertions match those in the shifted-register constructor.
+ ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
+ ASSERT(shift_ == LSL);
+ } else {
+ ASSERT(offset.IsExtendedRegister());
+ ASSERT(addrmode == Offset);
+
+ regoffset_ = offset.reg();
+ extend_ = offset.extend();
+ shift_amount_ = offset.shift_amount();
+
+ shift_= NO_SHIFT;
+ offset_ = 0;
+
+ // These assertions match those in the extended-register constructor.
+ ASSERT(!regoffset_.IsSP());
+ ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
+ ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
+ }
+}
+
+bool MemOperand::IsImmediateOffset() const {
+ return (addrmode_ == Offset) && regoffset_.Is(NoReg);
+}
+
+
+bool MemOperand::IsRegisterOffset() const {
+ return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
+}
+
+
+bool MemOperand::IsPreIndex() const {
+ return addrmode_ == PreIndex;
+}
+
+
+bool MemOperand::IsPostIndex() const {
+ return addrmode_ == PostIndex;
+}
+
+Operand MemOperand::OffsetAsOperand() const {
+ if (IsImmediateOffset()) {
+ return offset();
+ } else {
+ ASSERT(IsRegisterOffset());
+ if (extend() == NO_EXTEND) {
+ return Operand(regoffset(), shift(), shift_amount());
+ } else {
+ return Operand(regoffset(), extend(), shift_amount());
+ }
+ }
+}
+
+
+void Assembler::Unreachable() {
+#ifdef USE_SIMULATOR
+ debug("UNREACHABLE", __LINE__, BREAK);
+#else
+ // Crash by branching to 0. lr now points near the fault.
+ Emit(BLR | Rn(xzr));
+#endif
+}
+
+
+Address Assembler::target_pointer_address_at(Address pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(pc);
+ ASSERT(instr->IsLdrLiteralX());
+ return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
+}
+
+
+// Read/Modify the code target address in the branch/call instruction at pc.
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
+ return Memory::Address_at(target_pointer_address_at(pc));
+}
+
+
+Address Assembler::target_address_at(Address pc, Code* code) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+}
+
+
+Address Assembler::target_address_from_return_address(Address pc) {
+ // Returns the address of the call target from the return address that will
+ // be returned to after a call.
+ // Call sequence on ARM64 is:
+ // ldr ip0, #... @ load from literal pool
+ // blr ip0
+ Address candidate = pc - 2 * kInstructionSize;
+ Instruction* instr = reinterpret_cast<Instruction*>(candidate);
+ USE(instr);
+ ASSERT(instr->IsLdrLiteralX());
+ return candidate;
+}
+
+
+Address Assembler::return_address_from_call_start(Address pc) {
+ // The call, generated by MacroAssembler::Call, is one of two possible
+ // sequences:
+ //
+ // Without relocation:
+ // movz temp, #(target & 0x000000000000ffff)
+ // movk temp, #(target & 0x00000000ffff0000)
+ // movk temp, #(target & 0x0000ffff00000000)
+ // blr temp
+ //
+ // With relocation:
+ // ldr temp, =target
+ // blr temp
+ //
+ // The return address is immediately after the blr instruction in both cases,
+ // so it can be found by adding the call size to the address at the start of
+ // the call sequence.
+ STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 4 * kInstructionSize);
+ STATIC_ASSERT(Assembler::kCallSizeWithRelocation == 2 * kInstructionSize);
+
+ Instruction* instr = reinterpret_cast<Instruction*>(pc);
+ if (instr->IsMovz()) {
+ // Verify the instruction sequence.
+ ASSERT(instr->following(1)->IsMovk());
+ ASSERT(instr->following(2)->IsMovk());
+ ASSERT(instr->following(3)->IsBranchAndLinkToRegister());
+ return pc + Assembler::kCallSizeWithoutRelocation;
+ } else {
+ // Verify the instruction sequence.
+ ASSERT(instr->IsLdrLiteralX());
+ ASSERT(instr->following(1)->IsBranchAndLinkToRegister());
+ return pc + Assembler::kCallSizeWithRelocation;
+ }
+}
+
+
+void Assembler::deserialization_set_special_target_at(
+ Address constant_pool_entry, Code* code, Address target) {
+ Memory::Address_at(constant_pool_entry) = target;
+}
+
+
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target) {
+ Memory::Address_at(target_pointer_address_at(pc)) = target;
+ // Intuitively, we would think it is necessary to always flush the
+ // instruction cache after patching a target address in the code as follows:
+ // CPU::FlushICache(pc, sizeof(target));
+ // However, on ARM, an instruction is actually patched in the case of
+ // embedded constants of the form:
+ // ldr ip, [pc, #...]
+ // since the instruction accessing this address in the constant pool remains
+ // unchanged, a flush is not required.
+}
+
+
+void Assembler::set_target_address_at(Address pc,
+ Code* code,
+ Address target) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target);
+}
+
+
+int RelocInfo::target_address_size() {
+ return kPointerSize;
+}
+
+
+Address RelocInfo::target_address() {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+
+Address RelocInfo::target_address_address() {
+ ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
+ || rmode_ == EMBEDDED_OBJECT
+ || rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_pointer_address_at(pc_);
+}
+
+
+Address RelocInfo::constant_pool_entry_address() {
+ ASSERT(IsInConstantPool());
+ return Assembler::target_pointer_address_at(pc_);
+}
+
+
+Object* RelocInfo::target_object() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Handle<Object>(reinterpret_cast<Object**>(
+ Assembler::target_address_at(pc_, host_)));
+}
+
+
+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ ASSERT(!target->IsConsString());
+ Assembler::set_target_address_at(pc_, host_,
+ reinterpret_cast<Address>(target));
+ if (mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL &&
+ target->IsHeapObject()) {
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ }
+}
+
+
+Address RelocInfo::target_reference() {
+ ASSERT(rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_address_at(pc_, host_);
+}
+
+
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ return target_address();
+}
+
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode mode) {
+ ASSERT(IsRuntimeEntry(rmode_));
+ if (target_address() != target) set_target_address(target, mode);
+}
+
+
+Handle<Cell> RelocInfo::target_cell_handle() {
+ UNIMPLEMENTED();
+ Cell *null_cell = NULL;
+ return Handle<Cell>(null_cell);
+}
+
+
+Cell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::CELL);
+ return Cell::FromValueAddress(Memory::Address_at(pc_));
+}
+
+
+void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
+ UNIMPLEMENTED();
+}
+
+
+static const int kCodeAgeSequenceSize = 5 * kInstructionSize;
+static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
+
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ UNREACHABLE(); // This should never be reached on ARM64.
+ return Handle<Object>();
+}
+
+
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(!Code::IsYoungSequence(pc_));
+ // Read the stub entry point from the code age sequence.
+ Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
+ return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(!Code::IsYoungSequence(pc_));
+ // Overwrite the stub entry point in the code age sequence. This is loaded as
+ // a literal so there is no need to call FlushICache here.
+ Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
+ Memory::Address_at(stub_entry_address) = stub->instruction_start();
+}
+
+
+Address RelocInfo::call_address() {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ // For the above sequences the Relocinfo points to the load literal loading
+ // the call address.
+ return Assembler::target_address_at(pc_, host_);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ Assembler::set_target_address_at(pc_, host_, target);
+ if (host() != NULL) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
+
+
+void RelocInfo::WipeOut() {
+ ASSERT(IsEmbeddedObject(rmode_) ||
+ IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) ||
+ IsExternalReference(rmode_));
+ Assembler::set_target_address_at(pc_, host_, NULL);
+}
+
+
+bool RelocInfo::IsPatchedReturnSequence() {
+ // The sequence must be:
+ // ldr ip0, [pc, #offset]
+ // blr ip0
+ // See arm64/debug-arm64.cc BreakLocationIterator::SetDebugBreakAtReturn().
+ Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
+ Instruction* i2 = i1->following();
+ return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
+ i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code());
+}
+
+
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+ Instruction* current_instr = reinterpret_cast<Instruction*>(pc_);
+ return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP);
+}
+
+
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ visitor->VisitEmbeddedPointer(this);
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ visitor->VisitCodeTarget(this);
+ } else if (mode == RelocInfo::CELL) {
+ visitor->VisitCell(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ visitor->VisitExternalReference(this);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ } else if (((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence())) &&
+ isolate->debug()->has_break_points()) {
+ visitor->VisitDebugTarget(this);
+#endif
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
+ visitor->VisitRuntimeEntry(this);
+ }
+}
+
+
+template<typename StaticVisitor>
+void RelocInfo::Visit(Heap* heap) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ StaticVisitor::VisitEmbeddedPointer(heap, this);
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ StaticVisitor::VisitCodeTarget(heap, this);
+ } else if (mode == RelocInfo::CELL) {
+ StaticVisitor::VisitCell(heap, this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ StaticVisitor::VisitExternalReference(this);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ } else if (heap->isolate()->debug()->has_break_points() &&
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
+ StaticVisitor::VisitDebugTarget(heap, this);
+#endif
+ } else if (RelocInfo::IsRuntimeEntry(mode)) {
+ StaticVisitor::VisitRuntimeEntry(this);
+ }
+}
+
+
+LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
+ ASSERT(rt.IsValid());
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDR_x : LDR_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? LDR_d : LDR_s;
+ }
+}
+
+
+LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
+ const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDP_x : LDP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? LDP_d : LDP_s;
+ }
+}
+
+
+LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
+ ASSERT(rt.IsValid());
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STR_x : STR_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? STR_d : STR_s;
+ }
+}
+
+
+LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
+ const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STP_x : STP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? STP_d : STP_s;
+ }
+}
+
+
+LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDNP_x : LDNP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? LDNP_d : LDNP_s;
+ }
+}
+
+
+LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2) {
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STNP_x : STNP_w;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? STNP_d : STNP_s;
+ }
+}
+
+
+int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
+ ASSERT(kStartOfLabelLinkChain == 0);
+ int offset = LinkAndGetByteOffsetTo(label);
+ ASSERT(IsAligned(offset, kInstructionSize));
+ return offset >> kInstructionSizeLog2;
+}
+
+
+Instr Assembler::Flags(FlagsUpdate S) {
+ if (S == SetFlags) {
+ return 1 << FlagsUpdate_offset;
+ } else if (S == LeaveFlags) {
+ return 0 << FlagsUpdate_offset;
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+Instr Assembler::Cond(Condition cond) {
+ return cond << Condition_offset;
+}
+
+
+Instr Assembler::ImmPCRelAddress(int imm21) {
+ CHECK(is_int21(imm21));
+ Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
+ Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
+ Instr immlo = imm << ImmPCRelLo_offset;
+ return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
+}
+
+
+Instr Assembler::ImmUncondBranch(int imm26) {
+ CHECK(is_int26(imm26));
+ return truncate_to_int26(imm26) << ImmUncondBranch_offset;
+}
+
+
+Instr Assembler::ImmCondBranch(int imm19) {
+ CHECK(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmCondBranch_offset;
+}
+
+
+Instr Assembler::ImmCmpBranch(int imm19) {
+ CHECK(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmCmpBranch_offset;
+}
+
+
+Instr Assembler::ImmTestBranch(int imm14) {
+ CHECK(is_int14(imm14));
+ return truncate_to_int14(imm14) << ImmTestBranch_offset;
+}
+
+
+Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
+ ASSERT(is_uint6(bit_pos));
+ // Subtract five from the shift offset, as we need bit 5 from bit_pos.
+ unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
+ unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
+ b5 &= ImmTestBranchBit5_mask;
+ b40 &= ImmTestBranchBit40_mask;
+ return b5 | b40;
+}
+
+
+Instr Assembler::SF(Register rd) {
+ return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
+}
+
+
+Instr Assembler::ImmAddSub(int64_t imm) {
+ ASSERT(IsImmAddSub(imm));
+ if (is_uint12(imm)) { // No shift required.
+ return imm << ImmAddSub_offset;
+ } else {
+ return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
+ }
+}
+
+
+Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
+ ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
+ ((reg_size == kWRegSizeInBits) && is_uint5(imms)));
+ USE(reg_size);
+ return imms << ImmS_offset;
+}
+
+
+Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
+ ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
+ ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
+ USE(reg_size);
+ ASSERT(is_uint6(immr));
+ return immr << ImmR_offset;
+}
+
+
+Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
+ ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+ ASSERT(is_uint6(imms));
+ ASSERT((reg_size == kXRegSizeInBits) || is_uint6(imms + 3));
+ USE(reg_size);
+ return imms << ImmSetBits_offset;
+}
+
+
+Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
+ ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+ ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
+ ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
+ USE(reg_size);
+ return immr << ImmRotate_offset;
+}
+
+
+Instr Assembler::ImmLLiteral(int imm19) {
+ CHECK(is_int19(imm19));
+ return truncate_to_int19(imm19) << ImmLLiteral_offset;
+}
+
+
+Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
+ ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+ ASSERT((reg_size == kXRegSizeInBits) || (bitn == 0));
+ USE(reg_size);
+ return bitn << BitN_offset;
+}
+
+
+Instr Assembler::ShiftDP(Shift shift) {
+ ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
+ return shift << ShiftDP_offset;
+}
+
+
+Instr Assembler::ImmDPShift(unsigned amount) {
+ ASSERT(is_uint6(amount));
+ return amount << ImmDPShift_offset;
+}
+
+
+Instr Assembler::ExtendMode(Extend extend) {
+ return extend << ExtendMode_offset;
+}
+
+
+Instr Assembler::ImmExtendShift(unsigned left_shift) {
+ ASSERT(left_shift <= 4);
+ return left_shift << ImmExtendShift_offset;
+}
+
+
+Instr Assembler::ImmCondCmp(unsigned imm) {
+ ASSERT(is_uint5(imm));
+ return imm << ImmCondCmp_offset;
+}
+
+
+Instr Assembler::Nzcv(StatusFlags nzcv) {
+ return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
+}
+
+
+Instr Assembler::ImmLSUnsigned(int imm12) {
+ ASSERT(is_uint12(imm12));
+ return imm12 << ImmLSUnsigned_offset;
+}
+
+
+Instr Assembler::ImmLS(int imm9) {
+ ASSERT(is_int9(imm9));
+ return truncate_to_int9(imm9) << ImmLS_offset;
+}
+
+
+Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
+ ASSERT(((imm7 >> size) << size) == imm7);
+ int scaled_imm7 = imm7 >> size;
+ ASSERT(is_int7(scaled_imm7));
+ return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
+}
+
+
+Instr Assembler::ImmShiftLS(unsigned shift_amount) {
+ ASSERT(is_uint1(shift_amount));
+ return shift_amount << ImmShiftLS_offset;
+}
+
+
+Instr Assembler::ImmException(int imm16) {
+ ASSERT(is_uint16(imm16));
+ return imm16 << ImmException_offset;
+}
+
+
+Instr Assembler::ImmSystemRegister(int imm15) {
+ ASSERT(is_uint15(imm15));
+ return imm15 << ImmSystemRegister_offset;
+}
+
+
+Instr Assembler::ImmHint(int imm7) {
+ ASSERT(is_uint7(imm7));
+ return imm7 << ImmHint_offset;
+}
+
+
+Instr Assembler::ImmBarrierDomain(int imm2) {
+ ASSERT(is_uint2(imm2));
+ return imm2 << ImmBarrierDomain_offset;
+}
+
+
+Instr Assembler::ImmBarrierType(int imm2) {
+ ASSERT(is_uint2(imm2));
+ return imm2 << ImmBarrierType_offset;
+}
+
+
+LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
+ ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
+ return static_cast<LSDataSize>(op >> SizeLS_offset);
+}
+
+
+Instr Assembler::ImmMoveWide(uint64_t imm) {
+ ASSERT(is_uint16(imm));
+ return imm << ImmMoveWide_offset;
+}
+
+
+Instr Assembler::ShiftMoveWide(int64_t shift) {
+ ASSERT(is_uint2(shift));
+ return shift << ShiftMoveWide_offset;
+}
+
+
+Instr Assembler::FPType(FPRegister fd) {
+ return fd.Is64Bits() ? FP64 : FP32;
+}
+
+
+Instr Assembler::FPScale(unsigned scale) {
+ ASSERT(is_uint6(scale));
+ return scale << FPScale_offset;
+}
+
+
+const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
+ return reg.Is64Bits() ? xzr : wzr;
+}
+
+
+void Assembler::LoadRelocated(const CPURegister& rt, const Operand& operand) {
+ LoadRelocatedValue(rt, operand, LDR_x_lit);
+}
+
+
+inline void Assembler::CheckBuffer() {
+ ASSERT(pc_ < (buffer_ + buffer_size_));
+ if (buffer_space() < kGap) {
+ GrowBuffer();
+ }
+ if (pc_offset() >= next_veneer_pool_check_) {
+ CheckVeneerPool(false, true);
+ }
+ if (pc_offset() >= next_constant_pool_check_) {
+ CheckConstPool(false, true);
+ }
+}
+
+
+TypeFeedbackId Assembler::RecordedAstId() {
+ ASSERT(!recorded_ast_id_.IsNone());
+ return recorded_ast_id_;
+}
+
+
+void Assembler::ClearRecordedAstId() {
+ recorded_ast_id_ = TypeFeedbackId::None();
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_ASSEMBLER_ARM64_INL_H_
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
new file mode 100644
index 000000000..8bee92ccc
--- /dev/null
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -0,0 +1,2813 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#define ARM64_DEFINE_REG_STATICS
+
+#include "arm64/assembler-arm64-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// CpuFeatures utilities (for V8 compatibility).
+
+ExternalReference ExternalReference::cpu_features() {
+ return ExternalReference(&CpuFeatures::supported_);
+}
+
+
+// -----------------------------------------------------------------------------
+// CPURegList utilities.
+
+CPURegister CPURegList::PopLowestIndex() {
+ ASSERT(IsValid());
+ if (IsEmpty()) {
+ return NoCPUReg;
+ }
+ int index = CountTrailingZeros(list_, kRegListSizeInBits);
+ ASSERT((1 << index) & list_);
+ Remove(index);
+ return CPURegister::Create(index, size_, type_);
+}
+
+
+CPURegister CPURegList::PopHighestIndex() {
+ ASSERT(IsValid());
+ if (IsEmpty()) {
+ return NoCPUReg;
+ }
+ int index = CountLeadingZeros(list_, kRegListSizeInBits);
+ index = kRegListSizeInBits - 1 - index;
+ ASSERT((1 << index) & list_);
+ Remove(index);
+ return CPURegister::Create(index, size_, type_);
+}
+
+
+void CPURegList::RemoveCalleeSaved() {
+ if (type() == CPURegister::kRegister) {
+ Remove(GetCalleeSaved(RegisterSizeInBits()));
+ } else if (type() == CPURegister::kFPRegister) {
+ Remove(GetCalleeSavedFP(RegisterSizeInBits()));
+ } else {
+ ASSERT(type() == CPURegister::kNoRegister);
+ ASSERT(IsEmpty());
+ // The list must already be empty, so do nothing.
+ }
+}
+
+
+CPURegList CPURegList::GetCalleeSaved(unsigned size) {
+ return CPURegList(CPURegister::kRegister, size, 19, 29);
+}
+
+
+CPURegList CPURegList::GetCalleeSavedFP(unsigned size) {
+ return CPURegList(CPURegister::kFPRegister, size, 8, 15);
+}
+
+
+CPURegList CPURegList::GetCallerSaved(unsigned size) {
+ // Registers x0-x18 and lr (x30) are caller-saved.
+ CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
+ list.Combine(lr);
+ return list;
+}
+
+
+CPURegList CPURegList::GetCallerSavedFP(unsigned size) {
+ // Registers d0-d7 and d16-d31 are caller-saved.
+ CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7);
+ list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31));
+ return list;
+}
+
+
+// This function defines the list of registers which are associated with a
+// safepoint slot. Safepoint register slots are saved contiguously on the stack.
+// MacroAssembler::SafepointRegisterStackIndex handles mapping from register
+// code to index in the safepoint register slots. Any change here can affect
+// this mapping.
+CPURegList CPURegList::GetSafepointSavedRegisters() {
+ CPURegList list = CPURegList::GetCalleeSaved();
+ list.Combine(
+ CPURegList(CPURegister::kRegister, kXRegSizeInBits, kJSCallerSaved));
+
+ // Note that unfortunately we can't use symbolic names for registers and have
+ // to directly use register codes. This is because this function is used to
+ // initialize some static variables and we can't rely on register variables
+ // to be initialized due to static initialization order issues in C++.
+
+ // Drop ip0 and ip1 (i.e. x16 and x17), as they should not be expected to be
+ // preserved outside of the macro assembler.
+ list.Remove(16);
+ list.Remove(17);
+
+ // Add x18 to the safepoint list, as although it's not in kJSCallerSaved, it
+ // is a caller-saved register according to the procedure call standard.
+ list.Combine(18);
+
+ // Drop jssp as the stack pointer doesn't need to be included.
+ list.Remove(28);
+
+ // Add the link register (x30) to the safepoint list.
+ list.Combine(30);
+
+ return list;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+const int RelocInfo::kApplyMask = 0;
+
+
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on ARM64 means that it is a movz/movk sequence. We don't
+ // generate those for relocatable pointers.
+ return false;
+}
+
+
+bool RelocInfo::IsInConstantPool() {
+ Instruction* instr = reinterpret_cast<Instruction*>(pc_);
+ return instr->IsLdrLiteralX();
+}
+
+
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+ // Patch the code at the current address with the supplied instructions.
+ Instr* pc = reinterpret_cast<Instr*>(pc_);
+ Instr* instr = reinterpret_cast<Instr*>(instructions);
+ for (int i = 0; i < instruction_count; i++) {
+ *(pc + i) = *(instr + i);
+ }
+
+ // Indicate that code has changed.
+ CPU::FlushICache(pc_, instruction_count * kInstructionSize);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+ UNIMPLEMENTED();
+}
+
+
+Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
+ Register reg3, Register reg4) {
+ CPURegList regs(reg1, reg2, reg3, reg4);
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (regs.IncludesAliasOf(candidate)) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return NoReg;
+}
+
+
+bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3, const CPURegister& reg4,
+ const CPURegister& reg5, const CPURegister& reg6,
+ const CPURegister& reg7, const CPURegister& reg8) {
+ int number_of_valid_regs = 0;
+ int number_of_valid_fpregs = 0;
+
+ RegList unique_regs = 0;
+ RegList unique_fpregs = 0;
+
+ const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
+
+ for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) {
+ if (regs[i].IsRegister()) {
+ number_of_valid_regs++;
+ unique_regs |= regs[i].Bit();
+ } else if (regs[i].IsFPRegister()) {
+ number_of_valid_fpregs++;
+ unique_fpregs |= regs[i].Bit();
+ } else {
+ ASSERT(!regs[i].IsValid());
+ }
+ }
+
+ int number_of_unique_regs =
+ CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte);
+ int number_of_unique_fpregs =
+ CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte);
+
+ ASSERT(number_of_valid_regs >= number_of_unique_regs);
+ ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs);
+
+ return (number_of_valid_regs != number_of_unique_regs) ||
+ (number_of_valid_fpregs != number_of_unique_fpregs);
+}
+
+
+bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3, const CPURegister& reg4,
+ const CPURegister& reg5, const CPURegister& reg6,
+ const CPURegister& reg7, const CPURegister& reg8) {
+ ASSERT(reg1.IsValid());
+ bool match = true;
+ match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
+ match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
+ match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
+ match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
+ match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
+ match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
+ match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
+ return match;
+}
+
+
+void Operand::initialize_handle(Handle<Object> handle) {
+ AllowDeferredHandleDereference using_raw_address;
+
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ if (obj->IsHeapObject()) {
+ ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+ immediate_ = reinterpret_cast<intptr_t>(handle.location());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ } else {
+ STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t));
+ immediate_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = RelocInfo::NONE64;
+ }
+}
+
+
+bool Operand::NeedsRelocation() const {
+ if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ return Serializer::enabled();
+ }
+
+ return !RelocInfo::IsNone(rmode_);
+}
+
+
+// Assembler
+
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
+ recorded_ast_id_(TypeFeedbackId::None()),
+ unresolved_branches_(),
+ positions_recorder_(this) {
+ const_pool_blocked_nesting_ = 0;
+ veneer_pool_blocked_nesting_ = 0;
+ Reset();
+}
+
+
+Assembler::~Assembler() {
+ ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(const_pool_blocked_nesting_ == 0);
+ ASSERT(veneer_pool_blocked_nesting_ == 0);
+}
+
+
+void Assembler::Reset() {
+#ifdef DEBUG
+ ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
+ ASSERT(const_pool_blocked_nesting_ == 0);
+ ASSERT(veneer_pool_blocked_nesting_ == 0);
+ ASSERT(unresolved_branches_.empty());
+ memset(buffer_, 0, pc_ - buffer_);
+#endif
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_),
+ reinterpret_cast<byte*>(pc_));
+ num_pending_reloc_info_ = 0;
+ next_constant_pool_check_ = 0;
+ next_veneer_pool_check_ = kMaxInt;
+ no_const_pool_before_ = 0;
+ first_const_pool_use_ = -1;
+ ClearRecordedAstId();
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ // Emit constant pool if necessary.
+ CheckConstPool(true, false);
+ ASSERT(num_pending_reloc_info_ == 0);
+
+ // Set up code descriptor.
+ if (desc) {
+ desc->buffer = reinterpret_cast<byte*>(buffer_);
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) -
+ reloc_info_writer.pos();
+ desc->origin = this;
+ }
+}
+
+
+void Assembler::Align(int m) {
+ ASSERT(m >= 4 && IsPowerOf2(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+void Assembler::CheckLabelLinkChain(Label const * label) {
+#ifdef DEBUG
+ if (label->is_linked()) {
+ int linkoffset = label->pos();
+ bool end_of_chain = false;
+ while (!end_of_chain) {
+ Instruction * link = InstructionAt(linkoffset);
+ int linkpcoffset = link->ImmPCOffset();
+ int prevlinkoffset = linkoffset + linkpcoffset;
+
+ end_of_chain = (linkoffset == prevlinkoffset);
+ linkoffset = linkoffset + linkpcoffset;
+ }
+ }
+#endif
+}
+
+
+void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
+ Label* label,
+ Instruction* label_veneer) {
+ ASSERT(label->is_linked());
+
+ CheckLabelLinkChain(label);
+
+ Instruction* link = InstructionAt(label->pos());
+ Instruction* prev_link = link;
+ Instruction* next_link;
+ bool end_of_chain = false;
+
+ while (link != branch && !end_of_chain) {
+ next_link = link->ImmPCOffsetTarget();
+ end_of_chain = (link == next_link);
+ prev_link = link;
+ link = next_link;
+ }
+
+ ASSERT(branch == link);
+ next_link = branch->ImmPCOffsetTarget();
+
+ if (branch == prev_link) {
+ // The branch is the first instruction in the chain.
+ if (branch == next_link) {
+ // It is also the last instruction in the chain, so it is the only branch
+ // currently referring to this label.
+ label->Unuse();
+ } else {
+ label->link_to(reinterpret_cast<byte*>(next_link) - buffer_);
+ }
+
+ } else if (branch == next_link) {
+ // The branch is the last (but not also the first) instruction in the chain.
+ prev_link->SetImmPCOffsetTarget(prev_link);
+
+ } else {
+ // The branch is in the middle of the chain.
+ if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
+ prev_link->SetImmPCOffsetTarget(next_link);
+ } else if (label_veneer != NULL) {
+ // Use the veneer for all previous links in the chain.
+ prev_link->SetImmPCOffsetTarget(prev_link);
+
+ end_of_chain = false;
+ link = next_link;
+ while (!end_of_chain) {
+ next_link = link->ImmPCOffsetTarget();
+ end_of_chain = (link == next_link);
+ link->SetImmPCOffsetTarget(label_veneer);
+ link = next_link;
+ }
+ } else {
+ // The assert below will fire.
+ // Some other work could be attempted to fix up the chain, but it would be
+ // rather complicated. If we crash here, we may want to consider using an
+ // other mechanism than a chain of branches.
+ //
+ // Note that this situation currently should not happen, as we always call
+ // this function with a veneer to the target label.
+ // However this could happen with a MacroAssembler in the following state:
+ // [previous code]
+ // B(label);
+ // [20KB code]
+ // Tbz(label); // First tbz. Pointing to unconditional branch.
+ // [20KB code]
+ // Tbz(label); // Second tbz. Pointing to the first tbz.
+ // [more code]
+ // and this function is called to remove the first tbz from the label link
+ // chain. Since tbz has a range of +-32KB, the second tbz cannot point to
+ // the unconditional branch.
+ CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link));
+ UNREACHABLE();
+ }
+ }
+
+ CheckLabelLinkChain(label);
+}
+
+
+void Assembler::bind(Label* label) {
+ // Bind label to the address at pc_. All instructions (most likely branches)
+ // that are linked to this label will be updated to point to the newly-bound
+ // label.
+
+ ASSERT(!label->is_near_linked());
+ ASSERT(!label->is_bound());
+
+ // If the label is linked, the link chain looks something like this:
+ //
+ // |--I----I-------I-------L
+ // |---------------------->| pc_offset
+ // |-------------->| linkoffset = label->pos()
+ // |<------| link->ImmPCOffset()
+ // |------>| prevlinkoffset = linkoffset + link->ImmPCOffset()
+ //
+ // On each iteration, the last link is updated and then removed from the
+ // chain until only one remains. At that point, the label is bound.
+ //
+ // If the label is not linked, no preparation is required before binding.
+ while (label->is_linked()) {
+ int linkoffset = label->pos();
+ Instruction* link = InstructionAt(linkoffset);
+ int prevlinkoffset = linkoffset + link->ImmPCOffset();
+
+ CheckLabelLinkChain(label);
+
+ ASSERT(linkoffset >= 0);
+ ASSERT(linkoffset < pc_offset());
+ ASSERT((linkoffset > prevlinkoffset) ||
+ (linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
+ ASSERT(prevlinkoffset >= 0);
+
+ // Update the link to point to the label.
+ link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
+
+ // Link the label to the previous link in the chain.
+ if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
+ // We hit kStartOfLabelLinkChain, so the chain is fully processed.
+ label->Unuse();
+ } else {
+ // Update the label for the next iteration.
+ label->link_to(prevlinkoffset);
+ }
+ }
+ label->bind_to(pc_offset());
+
+ ASSERT(label->is_bound());
+ ASSERT(!label->is_linked());
+
+ DeleteUnresolvedBranchInfoForLabel(label);
+}
+
+
+int Assembler::LinkAndGetByteOffsetTo(Label* label) {
+ ASSERT(sizeof(*pc_) == 1);
+ CheckLabelLinkChain(label);
+
+ int offset;
+ if (label->is_bound()) {
+ // The label is bound, so it does not need to be updated. Referring
+ // instructions must link directly to the label as they will not be
+ // updated.
+ //
+ // In this case, label->pos() returns the offset of the label from the
+ // start of the buffer.
+ //
+ // Note that offset can be zero for self-referential instructions. (This
+ // could be useful for ADR, for example.)
+ offset = label->pos() - pc_offset();
+ ASSERT(offset <= 0);
+ } else {
+ if (label->is_linked()) {
+ // The label is linked, so the referring instruction should be added onto
+ // the end of the label's link chain.
+ //
+ // In this case, label->pos() returns the offset of the last linked
+ // instruction from the start of the buffer.
+ offset = label->pos() - pc_offset();
+ ASSERT(offset != kStartOfLabelLinkChain);
+ // Note that the offset here needs to be PC-relative only so that the
+ // first instruction in a buffer can link to an unbound label. Otherwise,
+ // the offset would be 0 for this case, and 0 is reserved for
+ // kStartOfLabelLinkChain.
+ } else {
+ // The label is unused, so it now becomes linked and the referring
+ // instruction is at the start of the new link chain.
+ offset = kStartOfLabelLinkChain;
+ }
+ // The instruction at pc is now the last link in the label's chain.
+ label->link_to(pc_offset());
+ }
+
+ return offset;
+}
+
+
+void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
+ if (unresolved_branches_.empty()) {
+ ASSERT(next_veneer_pool_check_ == kMaxInt);
+ return;
+ }
+
+ // Branches to this label will be resolved when the label is bound below.
+ std::multimap<int, FarBranchInfo>::iterator it_tmp, it;
+ it = unresolved_branches_.begin();
+ while (it != unresolved_branches_.end()) {
+ it_tmp = it++;
+ if (it_tmp->second.label_ == label) {
+ CHECK(it_tmp->first >= pc_offset());
+ unresolved_branches_.erase(it_tmp);
+ }
+ }
+ if (unresolved_branches_.empty()) {
+ next_veneer_pool_check_ = kMaxInt;
+ } else {
+ next_veneer_pool_check_ =
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
+ }
+}
+
+
+void Assembler::StartBlockConstPool() {
+ if (const_pool_blocked_nesting_++ == 0) {
+ // Prevent constant pool checks happening by setting the next check to
+ // the biggest possible offset.
+ next_constant_pool_check_ = kMaxInt;
+ }
+}
+
+
+void Assembler::EndBlockConstPool() {
+ if (--const_pool_blocked_nesting_ == 0) {
+ // Check the constant pool hasn't been blocked for too long.
+ ASSERT((num_pending_reloc_info_ == 0) ||
+ (pc_offset() < (first_const_pool_use_ + kMaxDistToConstPool)));
+ // Two cases:
+ // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is
+ // still blocked
+ // * no_const_pool_before_ < next_constant_pool_check_ and the next emit
+ // will trigger a check.
+ next_constant_pool_check_ = no_const_pool_before_;
+ }
+}
+
+
+bool Assembler::is_const_pool_blocked() const {
+ return (const_pool_blocked_nesting_ > 0) ||
+ (pc_offset() < no_const_pool_before_);
+}
+
+
+bool Assembler::IsConstantPoolAt(Instruction* instr) {
+ // The constant pool marker is made of two instructions. These instructions
+ // will never be emitted by the JIT, so checking for the first one is enough:
+ // 0: ldr xzr, #<size of pool>
+ bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code());
+
+ // It is still worth asserting the marker is complete.
+ // 4: blr xzr
+ ASSERT(!result || (instr->following()->IsBranchAndLinkToRegister() &&
+ instr->following()->Rn() == xzr.code()));
+
+ return result;
+}
+
+
+int Assembler::ConstantPoolSizeAt(Instruction* instr) {
+#ifdef USE_SIMULATOR
+ // Assembler::debug() embeds constants directly into the instruction stream.
+ // Although this is not a genuine constant pool, treat it like one to avoid
+ // disassembling the constants.
+ if ((instr->Mask(ExceptionMask) == HLT) &&
+ (instr->ImmException() == kImmExceptionIsDebug)) {
+ const char* message =
+ reinterpret_cast<const char*>(
+ instr->InstructionAtOffset(kDebugMessageOffset));
+ int size = kDebugMessageOffset + strlen(message) + 1;
+ return RoundUp(size, kInstructionSize) / kInstructionSize;
+ }
+ // Same for printf support, see MacroAssembler::CallPrintf().
+ if ((instr->Mask(ExceptionMask) == HLT) &&
+ (instr->ImmException() == kImmExceptionIsPrintf)) {
+ return kPrintfLength / kInstructionSize;
+ }
+#endif
+ if (IsConstantPoolAt(instr)) {
+ return instr->ImmLLiteral();
+ } else {
+ return -1;
+ }
+}
+
+
+void Assembler::ConstantPoolMarker(uint32_t size) {
+ ASSERT(is_const_pool_blocked());
+ // + 1 is for the crash guard.
+ Emit(LDR_x_lit | ImmLLiteral(2 * size + 1) | Rt(xzr));
+}
+
+
+void Assembler::EmitPoolGuard() {
+ // We must generate only one instruction as this is used in scopes that
+ // control the size of the code generated.
+ Emit(BLR | Rn(xzr));
+}
+
+
+void Assembler::ConstantPoolGuard() {
+#ifdef DEBUG
+ // Currently this is only used after a constant pool marker.
+ ASSERT(is_const_pool_blocked());
+ Instruction* instr = reinterpret_cast<Instruction*>(pc_);
+ ASSERT(instr->preceding()->IsLdrLiteralX() &&
+ instr->preceding()->Rt() == xzr.code());
+#endif
+ EmitPoolGuard();
+}
+
+
+void Assembler::StartBlockVeneerPool() {
+ ++veneer_pool_blocked_nesting_;
+}
+
+
+void Assembler::EndBlockVeneerPool() {
+ if (--veneer_pool_blocked_nesting_ == 0) {
+ // Check the veneer pool hasn't been blocked for too long.
+ ASSERT(unresolved_branches_.empty() ||
+ (pc_offset() < unresolved_branches_first_limit()));
+ }
+}
+
+
+void Assembler::br(const Register& xn) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(xn.Is64Bits());
+ Emit(BR | Rn(xn));
+}
+
+
+void Assembler::blr(const Register& xn) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(xn.Is64Bits());
+ // The pattern 'blr xzr' is used as a guard to detect when execution falls
+ // through the constant pool. It should not be emitted.
+ ASSERT(!xn.Is(xzr));
+ Emit(BLR | Rn(xn));
+}
+
+
+void Assembler::ret(const Register& xn) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(xn.Is64Bits());
+ Emit(RET | Rn(xn));
+}
+
+
+void Assembler::b(int imm26) {
+ Emit(B | ImmUncondBranch(imm26));
+}
+
+
+void Assembler::b(Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ b(LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::b(int imm19, Condition cond) {
+ Emit(B_cond | ImmCondBranch(imm19) | cond);
+}
+
+
+void Assembler::b(Label* label, Condition cond) {
+ positions_recorder()->WriteRecordedPositions();
+ b(LinkAndGetInstructionOffsetTo(label), cond);
+}
+
+
+void Assembler::bl(int imm26) {
+ positions_recorder()->WriteRecordedPositions();
+ Emit(BL | ImmUncondBranch(imm26));
+}
+
+
+void Assembler::bl(Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ bl(LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::cbz(const Register& rt,
+ int imm19) {
+ positions_recorder()->WriteRecordedPositions();
+ Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
+}
+
+
+void Assembler::cbz(const Register& rt,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ cbz(rt, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::cbnz(const Register& rt,
+ int imm19) {
+ positions_recorder()->WriteRecordedPositions();
+ Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
+}
+
+
+void Assembler::cbnz(const Register& rt,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ cbnz(rt, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::tbz(const Register& rt,
+ unsigned bit_pos,
+ int imm14) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
+ Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
+}
+
+
+void Assembler::tbz(const Register& rt,
+ unsigned bit_pos,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::tbnz(const Register& rt,
+ unsigned bit_pos,
+ int imm14) {
+ positions_recorder()->WriteRecordedPositions();
+ ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
+ Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
+}
+
+
+void Assembler::tbnz(const Register& rt,
+ unsigned bit_pos,
+ Label* label) {
+ positions_recorder()->WriteRecordedPositions();
+ tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
+}
+
+
+void Assembler::adr(const Register& rd, int imm21) {
+ ASSERT(rd.Is64Bits());
+ Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
+}
+
+
+void Assembler::adr(const Register& rd, Label* label) {
+ adr(rd, LinkAndGetByteOffsetTo(label));
+}
+
+
+void Assembler::add(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, LeaveFlags, ADD);
+}
+
+
+void Assembler::adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, SetFlags, ADD);
+}
+
+
+void Assembler::cmn(const Register& rn,
+ const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rn);
+ adds(zr, rn, operand);
+}
+
+
+void Assembler::sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, LeaveFlags, SUB);
+}
+
+
+void Assembler::subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, SetFlags, SUB);
+}
+
+
+void Assembler::cmp(const Register& rn, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rn);
+ subs(zr, rn, operand);
+}
+
+
+void Assembler::neg(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sub(rd, zr, operand);
+}
+
+
+void Assembler::negs(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ subs(rd, zr, operand);
+}
+
+
+void Assembler::adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
+}
+
+
+void Assembler::adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
+}
+
+
+void Assembler::sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
+}
+
+
+void Assembler::sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
+}
+
+
+void Assembler::ngc(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sbc(rd, zr, operand);
+}
+
+
+void Assembler::ngcs(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sbcs(rd, zr, operand);
+}
+
+
+// Logical instructions.
+void Assembler::and_(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, AND);
+}
+
+
+void Assembler::ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ANDS);
+}
+
+
+void Assembler::tst(const Register& rn,
+ const Operand& operand) {
+ ands(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void Assembler::bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, BIC);
+}
+
+
+void Assembler::bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, BICS);
+}
+
+
+void Assembler::orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ORR);
+}
+
+
+void Assembler::orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ORN);
+}
+
+
+void Assembler::eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, EOR);
+}
+
+
+void Assembler::eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, EON);
+}
+
+
+void Assembler::lslv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::lsrv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::asrv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::rorv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+// Bitfield operations.
+void Assembler::bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | BFM | N |
+ ImmR(immr, rd.SizeInBits()) |
+ ImmS(imms, rn.SizeInBits()) |
+ Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ ASSERT(rd.Is64Bits() || rn.Is32Bits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | SBFM | N |
+ ImmR(immr, rd.SizeInBits()) |
+ ImmS(imms, rn.SizeInBits()) |
+ Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | UBFM | N |
+ ImmR(immr, rd.SizeInBits()) |
+ ImmS(imms, rn.SizeInBits()) |
+ Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | EXTR | N | Rm(rm) |
+ ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::csel(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSEL);
+}
+
+
+void Assembler::csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSINC);
+}
+
+
+void Assembler::csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSINV);
+}
+
+
+void Assembler::csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSNEG);
+}
+
+
+void Assembler::cset(const Register &rd, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ Register zr = AppropriateZeroRegFor(rd);
+ csinc(rd, zr, zr, InvertCondition(cond));
+}
+
+
+void Assembler::csetm(const Register &rd, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ Register zr = AppropriateZeroRegFor(rd);
+ csinv(rd, zr, zr, InvertCondition(cond));
+}
+
+
+void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ csinc(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ csinv(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
+ ASSERT((cond != al) && (cond != nv));
+ csneg(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::ConditionalSelect(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond,
+ ConditionalSelectOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ConditionalCompare(rn, operand, nzcv, cond, CCMN);
+}
+
+
+void Assembler::ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ConditionalCompare(rn, operand, nzcv, cond, CCMP);
+}
+
+
+void Assembler::DataProcessing3Source(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra,
+ DataProcessing3SourceOp op) {
+ Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::mul(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm));
+ Register zr = AppropriateZeroRegFor(rn);
+ DataProcessing3Source(rd, rn, rm, zr, MADD);
+}
+
+
+void Assembler::madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
+ DataProcessing3Source(rd, rn, rm, ra, MADD);
+}
+
+
+void Assembler::mneg(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm));
+ Register zr = AppropriateZeroRegFor(rn);
+ DataProcessing3Source(rd, rn, rm, zr, MSUB);
+}
+
+
+void Assembler::msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
+ DataProcessing3Source(rd, rn, rm, ra, MSUB);
+}
+
+
+void Assembler::smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
+}
+
+
+void Assembler::smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
+}
+
+
+void Assembler::umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
+}
+
+
+void Assembler::umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
+}
+
+
+void Assembler::smull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.Is64Bits());
+ ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
+}
+
+
+void Assembler::smulh(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(AreSameSizeAndType(rd, rn, rm));
+ DataProcessing3Source(rd, rn, rm, xzr, SMULH_x);
+}
+
+
+void Assembler::sdiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::udiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == rm.SizeInBits());
+ Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::rbit(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, RBIT);
+}
+
+
+void Assembler::rev16(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, REV16);
+}
+
+
+void Assembler::rev32(const Register& rd,
+ const Register& rn) {
+ ASSERT(rd.Is64Bits());
+ DataProcessing1Source(rd, rn, REV);
+}
+
+
+void Assembler::rev(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
+}
+
+
+void Assembler::clz(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, CLZ);
+}
+
+
+void Assembler::cls(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, CLS);
+}
+
+
+void Assembler::ldp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
+}
+
+
+void Assembler::stp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
+}
+
+
+void Assembler::ldpsw(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ ASSERT(rt.Is64Bits());
+ LoadStorePair(rt, rt2, src, LDPSW_x);
+}
+
+
+void Assembler::LoadStorePair(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op) {
+ // 'rt' and 'rt2' can only be aliased for stores.
+ ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
+ ASSERT(AreSameSizeAndType(rt, rt2));
+
+ Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
+ ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
+
+ Instr addrmodeop;
+ if (addr.IsImmediateOffset()) {
+ addrmodeop = LoadStorePairOffsetFixed;
+ } else {
+ // Pre-index and post-index modes.
+ ASSERT(!rt.Is(addr.base()));
+ ASSERT(!rt2.Is(addr.base()));
+ ASSERT(addr.offset() != 0);
+ if (addr.IsPreIndex()) {
+ addrmodeop = LoadStorePairPreIndexFixed;
+ } else {
+ ASSERT(addr.IsPostIndex());
+ addrmodeop = LoadStorePairPostIndexFixed;
+ }
+ }
+ Emit(addrmodeop | memop);
+}
+
+
+void Assembler::ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ LoadStorePairNonTemporal(rt, rt2, src,
+ LoadPairNonTemporalOpFor(rt, rt2));
+}
+
+
+void Assembler::stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ LoadStorePairNonTemporal(rt, rt2, dst,
+ StorePairNonTemporalOpFor(rt, rt2));
+}
+
+
+void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairNonTemporalOp op) {
+ ASSERT(!rt.Is(rt2));
+ ASSERT(AreSameSizeAndType(rt, rt2));
+ ASSERT(addr.IsImmediateOffset());
+
+ LSDataSize size = CalcLSPairDataSize(
+ static_cast<LoadStorePairOp>(op & LoadStorePairMask));
+ Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
+ ImmLSPair(addr.offset(), size));
+}
+
+
+// Memory instructions.
+void Assembler::ldrb(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, LDRB_w);
+}
+
+
+void Assembler::strb(const Register& rt, const MemOperand& dst) {
+ LoadStore(rt, dst, STRB_w);
+}
+
+
+void Assembler::ldrsb(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
+}
+
+
+void Assembler::ldrh(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, LDRH_w);
+}
+
+
+void Assembler::strh(const Register& rt, const MemOperand& dst) {
+ LoadStore(rt, dst, STRH_w);
+}
+
+
+void Assembler::ldrsh(const Register& rt, const MemOperand& src) {
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
+}
+
+
+void Assembler::ldr(const CPURegister& rt, const MemOperand& src) {
+ LoadStore(rt, src, LoadOpFor(rt));
+}
+
+
+void Assembler::str(const CPURegister& rt, const MemOperand& src) {
+ LoadStore(rt, src, StoreOpFor(rt));
+}
+
+
+void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
+ ASSERT(rt.Is64Bits());
+ LoadStore(rt, src, LDRSW_x);
+}
+
+
+void Assembler::ldr(const Register& rt, uint64_t imm) {
+ // TODO(all): Constant pool may be garbage collected. Hence we cannot store
+ // arbitrary values in them. Manually move it for now. Fix
+ // MacroAssembler::Fmov when this is implemented.
+ UNIMPLEMENTED();
+}
+
+
+void Assembler::ldr(const FPRegister& ft, double imm) {
+ // TODO(all): Constant pool may be garbage collected. Hence we cannot store
+ // arbitrary values in them. Manually move it for now. Fix
+ // MacroAssembler::Fmov when this is implemented.
+ UNIMPLEMENTED();
+}
+
+
+void Assembler::ldr(const FPRegister& ft, float imm) {
+ // TODO(all): Constant pool may be garbage collected. Hence we cannot store
+ // arbitrary values in them. Manually move it for now. Fix
+ // MacroAssembler::Fmov when this is implemented.
+ UNIMPLEMENTED();
+}
+
+
+void Assembler::mov(const Register& rd, const Register& rm) {
+ // Moves involving the stack pointer are encoded as add immediate with
+ // second operand of zero. Otherwise, orr with first operand zr is
+ // used.
+ if (rd.IsSP() || rm.IsSP()) {
+ add(rd, rm, 0);
+ } else {
+ orr(rd, AppropriateZeroRegFor(rd), rm);
+ }
+}
+
+
+void Assembler::mvn(const Register& rd, const Operand& operand) {
+ orn(rd, AppropriateZeroRegFor(rd), operand);
+}
+
+
+void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
+ ASSERT(rt.Is64Bits());
+ Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
+}
+
+
+void Assembler::msr(SystemRegister sysreg, const Register& rt) {
+ ASSERT(rt.Is64Bits());
+ Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
+}
+
+
+void Assembler::hint(SystemHint code) {
+ Emit(HINT | ImmHint(code) | Rt(xzr));
+}
+
+
+void Assembler::dmb(BarrierDomain domain, BarrierType type) {
+ Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+}
+
+
+void Assembler::dsb(BarrierDomain domain, BarrierType type) {
+ Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+}
+
+
+void Assembler::isb() {
+ Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
+}
+
+
+void Assembler::fmov(FPRegister fd, double imm) {
+ ASSERT(fd.Is64Bits());
+ ASSERT(IsImmFP64(imm));
+ Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm));
+}
+
+
+void Assembler::fmov(FPRegister fd, float imm) {
+ ASSERT(fd.Is32Bits());
+ ASSERT(IsImmFP32(imm));
+ Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm));
+}
+
+
+void Assembler::fmov(Register rd, FPRegister fn) {
+ ASSERT(rd.SizeInBits() == fn.SizeInBits());
+ FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
+ Emit(op | Rd(rd) | Rn(fn));
+}
+
+
+void Assembler::fmov(FPRegister fd, Register rn) {
+ ASSERT(fd.SizeInBits() == rn.SizeInBits());
+ FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx;
+ Emit(op | Rd(fd) | Rn(rn));
+}
+
+
+void Assembler::fmov(FPRegister fd, FPRegister fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn));
+}
+
+
+void Assembler::fadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FADD);
+}
+
+
+void Assembler::fsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FSUB);
+}
+
+
+void Assembler::fmul(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMUL);
+}
+
+
+void Assembler::fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
+}
+
+
+void Assembler::fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
+}
+
+
+void Assembler::fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
+}
+
+
+void Assembler::fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
+}
+
+
+void Assembler::fdiv(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FDIV);
+}
+
+
+void Assembler::fmax(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMAX);
+}
+
+
+void Assembler::fmaxnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMAXNM);
+}
+
+
+void Assembler::fmin(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMIN);
+}
+
+
+void Assembler::fminnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ FPDataProcessing2Source(fd, fn, fm, FMINNM);
+}
+
+
+void Assembler::fabs(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FABS);
+}
+
+
+void Assembler::fneg(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FNEG);
+}
+
+
+void Assembler::fsqrt(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FSQRT);
+}
+
+
+void Assembler::frinta(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FRINTA);
+}
+
+
+void Assembler::frintn(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FRINTN);
+}
+
+
+void Assembler::frintz(const FPRegister& fd,
+ const FPRegister& fn) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ FPDataProcessing1Source(fd, fn, FRINTZ);
+}
+
+
+void Assembler::fcmp(const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(fn.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
+}
+
+
+void Assembler::fcmp(const FPRegister& fn,
+ double value) {
+ USE(value);
+ // Although the fcmp instruction can strictly only take an immediate value of
+ // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
+ // affect the result of the comparison.
+ ASSERT(value == 0.0);
+ Emit(FPType(fn) | FCMP_zero | Rn(fn));
+}
+
+
+void Assembler::fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(fn.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
+}
+
+
+void Assembler::fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ ASSERT(fd.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
+}
+
+
+void Assembler::FPConvertToInt(const Register& rd,
+ const FPRegister& fn,
+ FPIntegerConvertOp op) {
+ Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd));
+}
+
+
+void Assembler::fcvt(const FPRegister& fd,
+ const FPRegister& fn) {
+ if (fd.Is64Bits()) {
+ // Convert float to double.
+ ASSERT(fn.Is32Bits());
+ FPDataProcessing1Source(fd, fn, FCVT_ds);
+ } else {
+ // Convert double to float.
+ ASSERT(fn.Is64Bits());
+ FPDataProcessing1Source(fd, fn, FCVT_sd);
+ }
+}
+
+
+void Assembler::fcvtau(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTAU);
+}
+
+
+void Assembler::fcvtas(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTAS);
+}
+
+
+void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTMU);
+}
+
+
+void Assembler::fcvtms(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTMS);
+}
+
+
+void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTNU);
+}
+
+
+void Assembler::fcvtns(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTNS);
+}
+
+
+void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTZU);
+}
+
+
+void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) {
+ FPConvertToInt(rd, fn, FCVTZS);
+}
+
+
+void Assembler::scvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ if (fbits == 0) {
+ Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd));
+ } else {
+ Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
+ Rd(fd));
+ }
+}
+
+
+void Assembler::ucvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ if (fbits == 0) {
+ Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
+ } else {
+ Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
+ Rd(fd));
+ }
+}
+
+
+// Note:
+// Below, a difference in case for the same letter indicates a
+// negated bit.
+// If b is 1, then B is 0.
+Instr Assembler::ImmFP32(float imm) {
+ ASSERT(IsImmFP32(imm));
+ // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = float_to_rawbits(imm);
+ // bit7: a000.0000
+ uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
+
+ return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
+}
+
+
+Instr Assembler::ImmFP64(double imm) {
+ ASSERT(IsImmFP64(imm));
+ // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = double_to_rawbits(imm);
+ // bit7: a000.0000
+ uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
+
+ return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
+}
+
+
+// Code generation helpers.
+void Assembler::MoveWide(const Register& rd,
+ uint64_t imm,
+ int shift,
+ MoveWideImmediateOp mov_op) {
+ if (shift >= 0) {
+ // Explicit shift specified.
+ ASSERT((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
+ ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
+ shift /= 16;
+ } else {
+ // Calculate a new immediate and shift combination to encode the immediate
+ // argument.
+ shift = 0;
+ if ((imm & ~0xffffUL) == 0) {
+ // Nothing to do.
+ } else if ((imm & ~(0xffffUL << 16)) == 0) {
+ imm >>= 16;
+ shift = 1;
+ } else if ((imm & ~(0xffffUL << 32)) == 0) {
+ ASSERT(rd.Is64Bits());
+ imm >>= 32;
+ shift = 2;
+ } else if ((imm & ~(0xffffUL << 48)) == 0) {
+ ASSERT(rd.Is64Bits());
+ imm >>= 48;
+ shift = 3;
+ }
+ }
+
+ ASSERT(is_uint16(imm));
+
+ Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
+ Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
+}
+
+
+void Assembler::AddSub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(!operand.NeedsRelocation());
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ ASSERT(IsImmAddSub(immediate));
+ Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
+ ImmAddSub(immediate) | dest_reg | RnSP(rn));
+ } else if (operand.IsShiftedRegister()) {
+ ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
+ ASSERT(operand.shift() != ROR);
+
+ // For instructions of the form:
+ // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ]
+ // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ]
+ // add/sub wsp, wsp, <Wm> [, LSL #0-3 ]
+ // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
+ // or their 64-bit register equivalents, convert the operand from shifted to
+ // extended register mode, and emit an add/sub extended instruction.
+ if (rn.IsSP() || rd.IsSP()) {
+ ASSERT(!(rd.IsSP() && (S == SetFlags)));
+ DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
+ AddSubExtendedFixed | op);
+ } else {
+ DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
+ }
+ } else {
+ ASSERT(operand.IsExtendedRegister());
+ DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
+ }
+}
+
+
+void Assembler::AddSubWithCarry(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(rd.SizeInBits() == operand.reg().SizeInBits());
+ ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+ ASSERT(!operand.NeedsRelocation());
+ Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::hlt(int code) {
+ ASSERT(is_uint16(code));
+ Emit(HLT | ImmException(code));
+}
+
+
+void Assembler::brk(int code) {
+ ASSERT(is_uint16(code));
+ Emit(BRK | ImmException(code));
+}
+
+
+void Assembler::debug(const char* message, uint32_t code, Instr params) {
+#ifdef USE_SIMULATOR
+ // Don't generate simulator specific code if we are building a snapshot, which
+ // might be run on real hardware.
+ if (!Serializer::enabled()) {
+#ifdef DEBUG
+ Serializer::TooLateToEnableNow();
+#endif
+ // The arguments to the debug marker need to be contiguous in memory, so
+ // make sure we don't try to emit pools.
+ BlockPoolsScope scope(this);
+
+ Label start;
+ bind(&start);
+
+ // Refer to instructions-arm64.h for a description of the marker and its
+ // arguments.
+ hlt(kImmExceptionIsDebug);
+ ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
+ dc32(code);
+ ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset);
+ dc32(params);
+ ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset);
+ EmitStringData(message);
+ hlt(kImmExceptionIsUnreachable);
+
+ return;
+ }
+ // Fall through if Serializer is enabled.
+#endif
+
+ if (params & BREAK) {
+ hlt(kImmExceptionIsDebug);
+ }
+}
+
+
+void Assembler::Logical(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ ASSERT(!operand.NeedsRelocation());
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ unsigned reg_size = rd.SizeInBits();
+
+ ASSERT(immediate != 0);
+ ASSERT(immediate != -1);
+ ASSERT(rd.Is64Bits() || is_uint32(immediate));
+
+ // If the operation is NOT, invert the operation and immediate.
+ if ((op & NOT) == NOT) {
+ op = static_cast<LogicalOp>(op & ~NOT);
+ immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
+ }
+
+ unsigned n, imm_s, imm_r;
+ if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be encoded in the instruction.
+ LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
+ } else {
+ // This case is handled in the macro assembler.
+ UNREACHABLE();
+ }
+ } else {
+ ASSERT(operand.IsShiftedRegister());
+ ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
+ Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
+ DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
+ }
+}
+
+
+void Assembler::LogicalImmediate(const Register& rd,
+ const Register& rn,
+ unsigned n,
+ unsigned imm_s,
+ unsigned imm_r,
+ LogicalOp op) {
+ unsigned reg_size = rd.SizeInBits();
+ Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
+ ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
+ Rn(rn));
+}
+
+
+void Assembler::ConditionalCompare(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op) {
+ Instr ccmpop;
+ ASSERT(!operand.NeedsRelocation());
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ ASSERT(IsImmConditionalCompare(immediate));
+ ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
+ } else {
+ ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+ ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
+ }
+ Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
+}
+
+
+void Assembler::DataProcessing1Source(const Register& rd,
+ const Register& rn,
+ DataProcessing1SourceOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ Emit(SF(rn) | op | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::FPDataProcessing1Source(const FPRegister& fd,
+ const FPRegister& fn,
+ FPDataProcessing1SourceOp op) {
+ Emit(FPType(fn) | op | Rn(fn) | Rd(fd));
+}
+
+
+void Assembler::FPDataProcessing2Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ FPDataProcessing2SourceOp op) {
+ ASSERT(fd.SizeInBits() == fn.SizeInBits());
+ ASSERT(fd.SizeInBits() == fm.SizeInBits());
+ Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
+}
+
+
+void Assembler::FPDataProcessing3Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa,
+ FPDataProcessing3SourceOp op) {
+ ASSERT(AreSameSizeAndType(fd, fn, fm, fa));
+ Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
+}
+
+
+void Assembler::EmitShift(const Register& rd,
+ const Register& rn,
+ Shift shift,
+ unsigned shift_amount) {
+ switch (shift) {
+ case LSL:
+ lsl(rd, rn, shift_amount);
+ break;
+ case LSR:
+ lsr(rd, rn, shift_amount);
+ break;
+ case ASR:
+ asr(rd, rn, shift_amount);
+ break;
+ case ROR:
+ ror(rd, rn, shift_amount);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void Assembler::EmitExtendShift(const Register& rd,
+ const Register& rn,
+ Extend extend,
+ unsigned left_shift) {
+ ASSERT(rd.SizeInBits() >= rn.SizeInBits());
+ unsigned reg_size = rd.SizeInBits();
+ // Use the correct size of register.
+ Register rn_ = Register::Create(rn.code(), rd.SizeInBits());
+ // Bits extracted are high_bit:0.
+ unsigned high_bit = (8 << (extend & 0x3)) - 1;
+ // Number of bits left in the result that are not introduced by the shift.
+ unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
+
+ if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
+ switch (extend) {
+ case UXTB:
+ case UXTH:
+ case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
+ case SXTB:
+ case SXTH:
+ case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
+ case UXTX:
+ case SXTX: {
+ ASSERT(rn.SizeInBits() == kXRegSizeInBits);
+ // Nothing to extend. Just shift.
+ lsl(rd, rn_, left_shift);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ } else {
+ // No need to extend as the extended bits would be shifted away.
+ lsl(rd, rn_, left_shift);
+ }
+}
+
+
+void Assembler::DataProcShiftedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op) {
+ ASSERT(operand.IsShiftedRegister());
+ ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
+ ASSERT(!operand.NeedsRelocation());
+ Emit(SF(rd) | op | Flags(S) |
+ ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
+ Rm(operand.reg()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::DataProcExtendedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op) {
+ ASSERT(!operand.NeedsRelocation());
+ Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
+ ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
+ dest_reg | RnSP(rn));
+}
+
+
+bool Assembler::IsImmAddSub(int64_t immediate) {
+ return is_uint12(immediate) ||
+ (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
+}
+
+void Assembler::LoadStore(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op) {
+ Instr memop = op | Rt(rt) | RnSP(addr.base());
+ ptrdiff_t offset = addr.offset();
+
+ if (addr.IsImmediateOffset()) {
+ LSDataSize size = CalcLSDataSize(op);
+ if (IsImmLSScaled(offset, size)) {
+ // Use the scaled addressing mode.
+ Emit(LoadStoreUnsignedOffsetFixed | memop |
+ ImmLSUnsigned(offset >> size));
+ } else if (IsImmLSUnscaled(offset)) {
+ // Use the unscaled addressing mode.
+ Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
+ } else {
+ // This case is handled in the macro assembler.
+ UNREACHABLE();
+ }
+ } else if (addr.IsRegisterOffset()) {
+ Extend ext = addr.extend();
+ Shift shift = addr.shift();
+ unsigned shift_amount = addr.shift_amount();
+
+ // LSL is encoded in the option field as UXTX.
+ if (shift == LSL) {
+ ext = UXTX;
+ }
+
+ // Shifts are encoded in one bit, indicating a left shift by the memory
+ // access size.
+ ASSERT((shift_amount == 0) ||
+ (shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
+ Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
+ ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
+ } else {
+ // Pre-index and post-index modes.
+ ASSERT(!rt.Is(addr.base()));
+ if (IsImmLSUnscaled(offset)) {
+ if (addr.IsPreIndex()) {
+ Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
+ } else {
+ ASSERT(addr.IsPostIndex());
+ Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
+ }
+ } else {
+ // This case is handled in the macro assembler.
+ UNREACHABLE();
+ }
+ }
+}
+
+
+bool Assembler::IsImmLSUnscaled(ptrdiff_t offset) {
+ return is_int9(offset);
+}
+
+
+bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) {
+ bool offset_is_size_multiple = (((offset >> size) << size) == offset);
+ return offset_is_size_multiple && is_uint12(offset >> size);
+}
+
+
+void Assembler::LoadLiteral(const CPURegister& rt, int offset_from_pc) {
+ ASSERT((offset_from_pc & ((1 << kLiteralEntrySizeLog2) - 1)) == 0);
+ // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
+ // constant pool. It should not be emitted.
+ ASSERT(!rt.Is(xzr));
+ Emit(LDR_x_lit |
+ ImmLLiteral(offset_from_pc >> kLiteralEntrySizeLog2) |
+ Rt(rt));
+}
+
+
+void Assembler::LoadRelocatedValue(const CPURegister& rt,
+ const Operand& operand,
+ LoadLiteralOp op) {
+ int64_t imm = operand.immediate();
+ ASSERT(is_int32(imm) || is_uint32(imm) || (rt.Is64Bits()));
+ RecordRelocInfo(operand.rmode(), imm);
+ BlockConstPoolFor(1);
+ Emit(op | ImmLLiteral(0) | Rt(rt));
+}
+
+
+// Test if a given value can be encoded in the immediate field of a logical
+// instruction.
+// If it can be encoded, the function returns true, and values pointed to by n,
+// imm_s and imm_r are updated with immediates encoded in the format required
+// by the corresponding fields in the logical instruction.
+// If it can not be encoded, the function returns false, and the values pointed
+// to by n, imm_s and imm_r are undefined.
+bool Assembler::IsImmLogical(uint64_t value,
+ unsigned width,
+ unsigned* n,
+ unsigned* imm_s,
+ unsigned* imm_r) {
+ ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
+ ASSERT((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
+
+ // Logical immediates are encoded using parameters n, imm_s and imm_r using
+ // the following table:
+ //
+ // N imms immr size S R
+ // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+ // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+ // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+ // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+ // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+ // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+ // (s bits must not be all set)
+ //
+ // A pattern is constructed of size bits, where the least significant S+1
+ // bits are set. The pattern is rotated right by R, and repeated across a
+ // 32 or 64-bit value, depending on destination register width.
+ //
+ // To test if an arbitary immediate can be encoded using this scheme, an
+ // iterative algorithm is used.
+ //
+ // TODO(mcapewel) This code does not consider using X/W register overlap to
+ // support 64-bit immediates where the top 32-bits are zero, and the bottom
+ // 32-bits are an encodable logical immediate.
+
+ // 1. If the value has all set or all clear bits, it can't be encoded.
+ if ((value == 0) || (value == 0xffffffffffffffffUL) ||
+ ((width == kWRegSizeInBits) && (value == 0xffffffff))) {
+ return false;
+ }
+
+ unsigned lead_zero = CountLeadingZeros(value, width);
+ unsigned lead_one = CountLeadingZeros(~value, width);
+ unsigned trail_zero = CountTrailingZeros(value, width);
+ unsigned trail_one = CountTrailingZeros(~value, width);
+ unsigned set_bits = CountSetBits(value, width);
+
+ // The fixed bits in the immediate s field.
+ // If width == 64 (X reg), start at 0xFFFFFF80.
+ // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
+ // widths won't be executed.
+ int imm_s_fixed = (width == kXRegSizeInBits) ? -128 : -64;
+ int imm_s_mask = 0x3F;
+
+ for (;;) {
+ // 2. If the value is two bits wide, it can be encoded.
+ if (width == 2) {
+ *n = 0;
+ *imm_s = 0x3C;
+ *imm_r = (value & 3) - 1;
+ return true;
+ }
+
+ *n = (width == 64) ? 1 : 0;
+ *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
+ if ((lead_zero + set_bits) == width) {
+ *imm_r = 0;
+ } else {
+ *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
+ }
+
+ // 3. If the sum of leading zeros, trailing zeros and set bits is equal to
+ // the bit width of the value, it can be encoded.
+ if (lead_zero + trail_zero + set_bits == width) {
+ return true;
+ }
+
+ // 4. If the sum of leading ones, trailing ones and unset bits in the
+ // value is equal to the bit width of the value, it can be encoded.
+ if (lead_one + trail_one + (width - set_bits) == width) {
+ return true;
+ }
+
+ // 5. If the most-significant half of the bitwise value is equal to the
+ // least-significant half, return to step 2 using the least-significant
+ // half of the value.
+ uint64_t mask = (1UL << (width >> 1)) - 1;
+ if ((value & mask) == ((value >> (width >> 1)) & mask)) {
+ width >>= 1;
+ set_bits >>= 1;
+ imm_s_fixed >>= 1;
+ continue;
+ }
+
+ // 6. Otherwise, the value can't be encoded.
+ return false;
+ }
+}
+
+
+bool Assembler::IsImmConditionalCompare(int64_t immediate) {
+ return is_uint5(immediate);
+}
+
+
+bool Assembler::IsImmFP32(float imm) {
+ // Valid values will have the form:
+ // aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = float_to_rawbits(imm);
+ // bits[19..0] are cleared.
+ if ((bits & 0x7ffff) != 0) {
+ return false;
+ }
+
+ // bits[29..25] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 16) & 0x3e00;
+ if (b_pattern != 0 && b_pattern != 0x3e00) {
+ return false;
+ }
+
+ // bit[30] and bit[29] are opposite.
+ if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
+ return false;
+ }
+
+ return true;
+}
+
+
+bool Assembler::IsImmFP64(double imm) {
+ // Valid values will have the form:
+ // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = double_to_rawbits(imm);
+ // bits[47..0] are cleared.
+ if ((bits & 0xffffffffffffL) != 0) {
+ return false;
+ }
+
+ // bits[61..54] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 48) & 0x3fc0;
+ if (b_pattern != 0 && b_pattern != 0x3fc0) {
+ return false;
+ }
+
+ // bit[62] and bit[61] are opposite.
+ if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) {
+ return false;
+ }
+
+ return true;
+}
+
+
+void Assembler::GrowBuffer() {
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // Compute new buffer size.
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4 * KB) {
+ desc.buffer_size = 4 * KB;
+ } else if (buffer_size_ < 1 * MB) {
+ desc.buffer_size = 2 * buffer_size_;
+ } else {
+ desc.buffer_size = buffer_size_ + 1 * MB;
+ }
+ CHECK_GT(desc.buffer_size, 0); // No overflow.
+
+ byte* buffer = reinterpret_cast<byte*>(buffer_);
+
+ // Set up new buffer.
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos();
+
+ // Copy the data.
+ intptr_t pc_delta = desc.buffer - buffer;
+ intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
+ (buffer + buffer_size_);
+ memmove(desc.buffer, buffer, desc.instr_size);
+ memmove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
+
+ // Switch buffers.
+ DeleteArray(buffer_);
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ = reinterpret_cast<byte*>(pc_) + pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // None of our relocation types are pc relative pointing outside the code
+ // buffer nor pc absolute pointing inside the code buffer, so there is no need
+ // to relocate any emitted relocation entries.
+
+ // Relocate pending relocation entries.
+ for (int i = 0; i < num_pending_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_reloc_info_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION);
+ if (rinfo.rmode() != RelocInfo::JS_RETURN) {
+ rinfo.set_pc(rinfo.pc() + pc_delta);
+ }
+ }
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ // We do not try to reuse pool constants.
+ RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
+ if (((rmode >= RelocInfo::JS_RETURN) &&
+ (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
+ (rmode == RelocInfo::CONST_POOL) ||
+ (rmode == RelocInfo::VENEER_POOL)) {
+ // Adjust code for new modes.
+ ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
+ || RelocInfo::IsJSReturn(rmode)
+ || RelocInfo::IsComment(rmode)
+ || RelocInfo::IsPosition(rmode)
+ || RelocInfo::IsConstPool(rmode)
+ || RelocInfo::IsVeneerPool(rmode));
+ // These modes do not need an entry in the constant pool.
+ } else {
+ ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
+ if (num_pending_reloc_info_ == 0) {
+ first_const_pool_use_ = pc_offset();
+ }
+ pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info.
+ BlockConstPoolFor(1);
+ }
+
+ if (!RelocInfo::IsNone(rmode)) {
+ // Don't record external references unless the heap will be serialized.
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ if (!Serializer::enabled() && !emit_debug_code()) {
+ return;
+ }
+ }
+ ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
+ if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+ RelocInfo reloc_info_with_ast_id(
+ reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL);
+ ClearRecordedAstId();
+ reloc_info_writer.Write(&reloc_info_with_ast_id);
+ } else {
+ reloc_info_writer.Write(&rinfo);
+ }
+ }
+}
+
+
+void Assembler::BlockConstPoolFor(int instructions) {
+ int pc_limit = pc_offset() + instructions * kInstructionSize;
+ if (no_const_pool_before_ < pc_limit) {
+ // If there are some pending entries, the constant pool cannot be blocked
+ // further than first_const_pool_use_ + kMaxDistToConstPool
+ ASSERT((num_pending_reloc_info_ == 0) ||
+ (pc_limit < (first_const_pool_use_ + kMaxDistToConstPool)));
+ no_const_pool_before_ = pc_limit;
+ }
+
+ if (next_constant_pool_check_ < no_const_pool_before_) {
+ next_constant_pool_check_ = no_const_pool_before_;
+ }
+}
+
+
+void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ // Some short sequence of instruction mustn't be broken up by constant pool
+ // emission, such sequences are protected by calls to BlockConstPoolFor and
+ // BlockConstPoolScope.
+ if (is_const_pool_blocked()) {
+ // Something is wrong if emission is forced and blocked at the same time.
+ ASSERT(!force_emit);
+ return;
+ }
+
+ // There is nothing to do if there are no pending constant pool entries.
+ if (num_pending_reloc_info_ == 0) {
+ // Calculate the offset of the next check.
+ next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
+ return;
+ }
+
+ // We emit a constant pool when:
+ // * requested to do so by parameter force_emit (e.g. after each function).
+ // * the distance to the first instruction accessing the constant pool is
+ // kAvgDistToConstPool or more.
+ // * no jump is required and the distance to the first instruction accessing
+ // the constant pool is at least kMaxDistToPConstool / 2.
+ ASSERT(first_const_pool_use_ >= 0);
+ int dist = pc_offset() - first_const_pool_use_;
+ if (!force_emit && dist < kAvgDistToConstPool &&
+ (require_jump || (dist < (kMaxDistToConstPool / 2)))) {
+ return;
+ }
+
+ int jump_instr = require_jump ? kInstructionSize : 0;
+ int size_pool_marker = kInstructionSize;
+ int size_pool_guard = kInstructionSize;
+ int pool_size = jump_instr + size_pool_marker + size_pool_guard +
+ num_pending_reloc_info_ * kPointerSize;
+ int needed_space = pool_size + kGap;
+
+ // Emit veneers for branches that would go out of range during emission of the
+ // constant pool.
+ CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + pool_size);
+
+ Label size_check;
+ bind(&size_check);
+
+ // Check that the code buffer is large enough before emitting the constant
+ // pool (include the jump over the pool, the constant pool marker, the
+ // constant pool guard, and the gap to the relocation information).
+ while (buffer_space() <= needed_space) {
+ GrowBuffer();
+ }
+
+ {
+ // Block recursive calls to CheckConstPool and protect from veneer pools.
+ BlockPoolsScope block_pools(this);
+ RecordComment("[ Constant Pool");
+ RecordConstPool(pool_size);
+
+ // Emit jump over constant pool if necessary.
+ Label after_pool;
+ if (require_jump) {
+ b(&after_pool);
+ }
+
+ // Emit a constant pool header. The header has two goals:
+ // 1) Encode the size of the constant pool, for use by the disassembler.
+ // 2) Terminate the program, to try to prevent execution from accidentally
+ // flowing into the constant pool.
+ // The header is therefore made of two arm64 instructions:
+ // ldr xzr, #<size of the constant pool in 32-bit words>
+ // blr xzr
+ // If executed the code will likely segfault and lr will point to the
+ // beginning of the constant pool.
+ // TODO(all): currently each relocated constant is 64 bits, consider adding
+ // support for 32-bit entries.
+ ConstantPoolMarker(2 * num_pending_reloc_info_);
+ ConstantPoolGuard();
+
+ // Emit constant pool entries.
+ for (int i = 0; i < num_pending_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_reloc_info_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION &&
+ rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
+ rinfo.rmode() != RelocInfo::CONST_POOL &&
+ rinfo.rmode() != RelocInfo::VENEER_POOL);
+
+ Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc());
+ // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+ ASSERT(instr->IsLdrLiteral() &&
+ instr->ImmLLiteral() == 0);
+
+ instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
+ dc64(rinfo.data());
+ }
+
+ num_pending_reloc_info_ = 0;
+ first_const_pool_use_ = -1;
+
+ RecordComment("]");
+
+ if (after_pool.is_linked()) {
+ bind(&after_pool);
+ }
+ }
+
+ // Since a constant pool was just emitted, move the check offset forward by
+ // the standard interval.
+ next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
+
+ ASSERT(SizeOfCodeGeneratedSince(&size_check) ==
+ static_cast<unsigned>(pool_size));
+}
+
+
+bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
+ // Account for the branch around the veneers and the guard.
+ int protection_offset = 2 * kInstructionSize;
+ return pc_offset() > max_reachable_pc - margin - protection_offset -
+ static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
+}
+
+
+void Assembler::RecordVeneerPool(int location_offset, int size) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ RelocInfo rinfo(buffer_ + location_offset,
+ RelocInfo::VENEER_POOL, static_cast<intptr_t>(size),
+ NULL);
+ reloc_info_writer.Write(&rinfo);
+#endif
+}
+
+
+void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
+ BlockPoolsScope scope(this);
+ RecordComment("[ Veneers");
+
+ // The exact size of the veneer pool must be recorded (see the comment at the
+ // declaration site of RecordConstPool()), but computing the number of
+ // veneers that will be generated is not obvious. So instead we remember the
+ // current position and will record the size after the pool has been
+ // generated.
+ Label size_check;
+ bind(&size_check);
+ int veneer_pool_relocinfo_loc = pc_offset();
+
+ Label end;
+ if (need_protection) {
+ b(&end);
+ }
+
+ EmitVeneersGuard();
+
+ Label veneer_size_check;
+
+ std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
+
+ it = unresolved_branches_.begin();
+ while (it != unresolved_branches_.end()) {
+ if (force_emit || ShouldEmitVeneer(it->first, margin)) {
+ Instruction* branch = InstructionAt(it->second.pc_offset_);
+ Label* label = it->second.label_;
+
+#ifdef DEBUG
+ bind(&veneer_size_check);
+#endif
+ // Patch the branch to point to the current position, and emit a branch
+ // to the label.
+ Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
+ RemoveBranchFromLabelLinkChain(branch, label, veneer);
+ branch->SetImmPCOffsetTarget(veneer);
+ b(label);
+#ifdef DEBUG
+ ASSERT(SizeOfCodeGeneratedSince(&veneer_size_check) <=
+ static_cast<uint64_t>(kMaxVeneerCodeSize));
+ veneer_size_check.Unuse();
+#endif
+
+ it_to_delete = it++;
+ unresolved_branches_.erase(it_to_delete);
+ } else {
+ ++it;
+ }
+ }
+
+ // Record the veneer pool size.
+ int pool_size = SizeOfCodeGeneratedSince(&size_check);
+ RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
+
+ if (unresolved_branches_.empty()) {
+ next_veneer_pool_check_ = kMaxInt;
+ } else {
+ next_veneer_pool_check_ =
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
+ }
+
+ bind(&end);
+
+ RecordComment("]");
+}
+
+
+void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
+ int margin) {
+ // There is nothing to do if there are no pending veneer pool entries.
+ if (unresolved_branches_.empty()) {
+ ASSERT(next_veneer_pool_check_ == kMaxInt);
+ return;
+ }
+
+ ASSERT(pc_offset() < unresolved_branches_first_limit());
+
+ // Some short sequence of instruction mustn't be broken up by veneer pool
+ // emission, such sequences are protected by calls to BlockVeneerPoolFor and
+ // BlockVeneerPoolScope.
+ if (is_veneer_pool_blocked()) {
+ ASSERT(!force_emit);
+ return;
+ }
+
+ if (!require_jump) {
+ // Prefer emitting veneers protected by an existing instruction.
+ margin *= kVeneerNoProtectionFactor;
+ }
+ if (force_emit || ShouldEmitVeneers(margin)) {
+ EmitVeneers(force_emit, require_jump, margin);
+ } else {
+ next_veneer_pool_check_ =
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
+ }
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_code_comments) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+int Assembler::buffer_space() const {
+ return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_);
+}
+
+
+void Assembler::RecordJSReturn() {
+ positions_recorder()->WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordDebugBreakSlot() {
+ positions_recorder()->WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
+
+
+void Assembler::RecordConstPool(int size) {
+ // We only need this for debugger support, to correctly compute offsets in the
+ // code.
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
+#endif
+}
+
+
+MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
new file mode 100644
index 000000000..1aae2f291
--- /dev/null
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -0,0 +1,2233 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_ASSEMBLER_ARM64_H_
+#define V8_ARM64_ASSEMBLER_ARM64_H_
+
+#include <list>
+#include <map>
+
+#include "globals.h"
+#include "utils.h"
+#include "assembler.h"
+#include "serialize.h"
+#include "arm64/instructions-arm64.h"
+#include "arm64/cpu-arm64.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// Registers.
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+
+static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
+
+
+// Some CPURegister methods can return Register and FPRegister types, so we
+// need to declare them in advance.
+struct Register;
+struct FPRegister;
+
+
+struct CPURegister {
+ enum RegisterType {
+ // The kInvalid value is used to detect uninitialized static instances,
+ // which are always zero-initialized before any constructors are called.
+ kInvalid = 0,
+ kRegister,
+ kFPRegister,
+ kNoRegister
+ };
+
+ static CPURegister Create(unsigned code, unsigned size, RegisterType type) {
+ CPURegister r = {code, size, type};
+ return r;
+ }
+
+ unsigned code() const;
+ RegisterType type() const;
+ RegList Bit() const;
+ unsigned SizeInBits() const;
+ int SizeInBytes() const;
+ bool Is32Bits() const;
+ bool Is64Bits() const;
+ bool IsValid() const;
+ bool IsValidOrNone() const;
+ bool IsValidRegister() const;
+ bool IsValidFPRegister() const;
+ bool IsNone() const;
+ bool Is(const CPURegister& other) const;
+
+ bool IsZero() const;
+ bool IsSP() const;
+
+ bool IsRegister() const;
+ bool IsFPRegister() const;
+
+ Register X() const;
+ Register W() const;
+ FPRegister D() const;
+ FPRegister S() const;
+
+ bool IsSameSizeAndType(const CPURegister& other) const;
+
+ // V8 compatibility.
+ bool is(const CPURegister& other) const { return Is(other); }
+ bool is_valid() const { return IsValid(); }
+
+ unsigned reg_code;
+ unsigned reg_size;
+ RegisterType reg_type;
+};
+
+
+struct Register : public CPURegister {
+ static Register Create(unsigned code, unsigned size) {
+ return Register(CPURegister::Create(code, size, CPURegister::kRegister));
+ }
+
+ Register() {
+ reg_code = 0;
+ reg_size = 0;
+ reg_type = CPURegister::kNoRegister;
+ }
+
+ explicit Register(const CPURegister& r) {
+ reg_code = r.reg_code;
+ reg_size = r.reg_size;
+ reg_type = r.reg_type;
+ ASSERT(IsValidOrNone());
+ }
+
+ Register(const Register& r) { // NOLINT(runtime/explicit)
+ reg_code = r.reg_code;
+ reg_size = r.reg_size;
+ reg_type = r.reg_type;
+ ASSERT(IsValidOrNone());
+ }
+
+ bool IsValid() const {
+ ASSERT(IsRegister() || IsNone());
+ return IsValidRegister();
+ }
+
+ static Register XRegFromCode(unsigned code);
+ static Register WRegFromCode(unsigned code);
+
+ // Start of V8 compatibility section ---------------------
+ // These memebers are necessary for compilation.
+ // A few of them may be unused for now.
+
+ static const int kNumRegisters = kNumberOfRegisters;
+ static int NumRegisters() { return kNumRegisters; }
+
+ // We allow crankshaft to use the following registers:
+ // - x0 to x15
+ // - x18 to x24
+ // - x27 (also context)
+ //
+ // TODO(all): Register x25 is currently free and could be available for
+ // crankshaft, but we don't use it as we might use it as a per function
+ // literal pool pointer in the future.
+ //
+ // TODO(all): Consider storing cp in x25 to have only two ranges.
+ // We split allocatable registers in three ranges called
+ // - "low range"
+ // - "high range"
+ // - "context"
+ static const unsigned kAllocatableLowRangeBegin = 0;
+ static const unsigned kAllocatableLowRangeEnd = 15;
+ static const unsigned kAllocatableHighRangeBegin = 18;
+ static const unsigned kAllocatableHighRangeEnd = 24;
+ static const unsigned kAllocatableContext = 27;
+
+ // Gap between low and high ranges.
+ static const int kAllocatableRangeGapSize =
+ (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
+
+ static const int kMaxNumAllocatableRegisters =
+ (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
+ (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1) + 1; // cp
+ static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
+
+ // Return true if the register is one that crankshaft can allocate.
+ bool IsAllocatable() const {
+ return ((reg_code == kAllocatableContext) ||
+ (reg_code <= kAllocatableLowRangeEnd) ||
+ ((reg_code >= kAllocatableHighRangeBegin) &&
+ (reg_code <= kAllocatableHighRangeEnd)));
+ }
+
+ static Register FromAllocationIndex(unsigned index) {
+ ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
+ // cp is the last allocatable register.
+ if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
+ return from_code(kAllocatableContext);
+ }
+
+ // Handle low and high ranges.
+ return (index <= kAllocatableLowRangeEnd)
+ ? from_code(index)
+ : from_code(index + kAllocatableRangeGapSize);
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
+ ASSERT((kAllocatableLowRangeBegin == 0) &&
+ (kAllocatableLowRangeEnd == 15) &&
+ (kAllocatableHighRangeBegin == 18) &&
+ (kAllocatableHighRangeEnd == 24) &&
+ (kAllocatableContext == 27));
+ const char* const names[] = {
+ "x0", "x1", "x2", "x3", "x4",
+ "x5", "x6", "x7", "x8", "x9",
+ "x10", "x11", "x12", "x13", "x14",
+ "x15", "x18", "x19", "x20", "x21",
+ "x22", "x23", "x24", "x27",
+ };
+ return names[index];
+ }
+
+ static int ToAllocationIndex(Register reg) {
+ ASSERT(reg.IsAllocatable());
+ unsigned code = reg.code();
+ if (code == kAllocatableContext) {
+ return NumAllocatableRegisters() - 1;
+ }
+
+ return (code <= kAllocatableLowRangeEnd)
+ ? code
+ : code - kAllocatableRangeGapSize;
+ }
+
+ static Register from_code(int code) {
+ // Always return an X register.
+ return Register::Create(code, kXRegSizeInBits);
+ }
+
+ // End of V8 compatibility section -----------------------
+};
+
+
+struct FPRegister : public CPURegister {
+ static FPRegister Create(unsigned code, unsigned size) {
+ return FPRegister(
+ CPURegister::Create(code, size, CPURegister::kFPRegister));
+ }
+
+ FPRegister() {
+ reg_code = 0;
+ reg_size = 0;
+ reg_type = CPURegister::kNoRegister;
+ }
+
+ explicit FPRegister(const CPURegister& r) {
+ reg_code = r.reg_code;
+ reg_size = r.reg_size;
+ reg_type = r.reg_type;
+ ASSERT(IsValidOrNone());
+ }
+
+ FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
+ reg_code = r.reg_code;
+ reg_size = r.reg_size;
+ reg_type = r.reg_type;
+ ASSERT(IsValidOrNone());
+ }
+
+ bool IsValid() const {
+ ASSERT(IsFPRegister() || IsNone());
+ return IsValidFPRegister();
+ }
+
+ static FPRegister SRegFromCode(unsigned code);
+ static FPRegister DRegFromCode(unsigned code);
+
+ // Start of V8 compatibility section ---------------------
+ static const int kMaxNumRegisters = kNumberOfFPRegisters;
+
+ // Crankshaft can use all the FP registers except:
+ // - d15 which is used to keep the 0 double value
+ // - d30 which is used in crankshaft as a double scratch register
+ // - d31 which is used in the MacroAssembler as a double scratch register
+ static const unsigned kAllocatableLowRangeBegin = 0;
+ static const unsigned kAllocatableLowRangeEnd = 14;
+ static const unsigned kAllocatableHighRangeBegin = 16;
+ static const unsigned kAllocatableHighRangeEnd = 29;
+
+ static const RegList kAllocatableFPRegisters = 0x3fff7fff;
+
+ // Gap between low and high ranges.
+ static const int kAllocatableRangeGapSize =
+ (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
+
+ static const int kMaxNumAllocatableRegisters =
+ (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
+ (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1);
+ static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
+
+ // Return true if the register is one that crankshaft can allocate.
+ bool IsAllocatable() const {
+ return (Bit() & kAllocatableFPRegisters) != 0;
+ }
+
+ static FPRegister FromAllocationIndex(unsigned int index) {
+ ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
+
+ return (index <= kAllocatableLowRangeEnd)
+ ? from_code(index)
+ : from_code(index + kAllocatableRangeGapSize);
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
+ ASSERT((kAllocatableLowRangeBegin == 0) &&
+ (kAllocatableLowRangeEnd == 14) &&
+ (kAllocatableHighRangeBegin == 16) &&
+ (kAllocatableHighRangeEnd == 29));
+ const char* const names[] = {
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14",
+ "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+ "d24", "d25", "d26", "d27", "d28", "d29"
+ };
+ return names[index];
+ }
+
+ static int ToAllocationIndex(FPRegister reg) {
+ ASSERT(reg.IsAllocatable());
+ unsigned code = reg.code();
+
+ return (code <= kAllocatableLowRangeEnd)
+ ? code
+ : code - kAllocatableRangeGapSize;
+ }
+
+ static FPRegister from_code(int code) {
+ // Always return a D register.
+ return FPRegister::Create(code, kDRegSizeInBits);
+ }
+ // End of V8 compatibility section -----------------------
+};
+
+
+STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
+STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
+
+
+#if defined(ARM64_DEFINE_REG_STATICS)
+#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
+ const CPURegister init_##register_class##_##name = {code, size, type}; \
+ const register_class& name = *reinterpret_cast<const register_class*>( \
+ &init_##register_class##_##name)
+#define ALIAS_REGISTER(register_class, alias, name) \
+ const register_class& alias = *reinterpret_cast<const register_class*>( \
+ &init_##register_class##_##name)
+#else
+#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
+ extern const register_class& name
+#define ALIAS_REGISTER(register_class, alias, name) \
+ extern const register_class& alias
+#endif // defined(ARM64_DEFINE_REG_STATICS)
+
+// No*Reg is used to indicate an unused argument, or an error case. Note that
+// these all compare equal (using the Is() method). The Register and FPRegister
+// variants are provided for convenience.
+INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
+INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
+INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
+
+// v8 compatibility.
+INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
+
+#define DEFINE_REGISTERS(N) \
+ INITIALIZE_REGISTER(Register, w##N, N, \
+ kWRegSizeInBits, CPURegister::kRegister); \
+ INITIALIZE_REGISTER(Register, x##N, N, \
+ kXRegSizeInBits, CPURegister::kRegister);
+REGISTER_CODE_LIST(DEFINE_REGISTERS)
+#undef DEFINE_REGISTERS
+
+INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
+ CPURegister::kRegister);
+INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
+ CPURegister::kRegister);
+
+#define DEFINE_FPREGISTERS(N) \
+ INITIALIZE_REGISTER(FPRegister, s##N, N, \
+ kSRegSizeInBits, CPURegister::kFPRegister); \
+ INITIALIZE_REGISTER(FPRegister, d##N, N, \
+ kDRegSizeInBits, CPURegister::kFPRegister);
+REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
+#undef DEFINE_FPREGISTERS
+
+#undef INITIALIZE_REGISTER
+
+// Registers aliases.
+ALIAS_REGISTER(Register, ip0, x16);
+ALIAS_REGISTER(Register, ip1, x17);
+ALIAS_REGISTER(Register, wip0, w16);
+ALIAS_REGISTER(Register, wip1, w17);
+// Root register.
+ALIAS_REGISTER(Register, root, x26);
+ALIAS_REGISTER(Register, rr, x26);
+// Context pointer register.
+ALIAS_REGISTER(Register, cp, x27);
+// We use a register as a JS stack pointer to overcome the restriction on the
+// architectural SP alignment.
+// We chose x28 because it is contiguous with the other specific purpose
+// registers.
+STATIC_ASSERT(kJSSPCode == 28);
+ALIAS_REGISTER(Register, jssp, x28);
+ALIAS_REGISTER(Register, wjssp, w28);
+ALIAS_REGISTER(Register, fp, x29);
+ALIAS_REGISTER(Register, lr, x30);
+ALIAS_REGISTER(Register, xzr, x31);
+ALIAS_REGISTER(Register, wzr, w31);
+
+// Keeps the 0 double value.
+ALIAS_REGISTER(FPRegister, fp_zero, d15);
+// Crankshaft double scratch register.
+ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d30);
+// MacroAssembler double scratch register.
+ALIAS_REGISTER(FPRegister, fp_scratch, d31);
+
+#undef ALIAS_REGISTER
+
+
+Register GetAllocatableRegisterThatIsNotOneOf(Register reg1,
+ Register reg2 = NoReg,
+ Register reg3 = NoReg,
+ Register reg4 = NoReg);
+
+
+// AreAliased returns true if any of the named registers overlap. Arguments set
+// to NoReg are ignored. The system stack pointer may be specified.
+bool AreAliased(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoReg,
+ const CPURegister& reg4 = NoReg,
+ const CPURegister& reg5 = NoReg,
+ const CPURegister& reg6 = NoReg,
+ const CPURegister& reg7 = NoReg,
+ const CPURegister& reg8 = NoReg);
+
+// AreSameSizeAndType returns true if all of the specified registers have the
+// same size, and are of the same type. The system stack pointer may be
+// specified. Arguments set to NoReg are ignored, as are any subsequent
+// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
+bool AreSameSizeAndType(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoCPUReg,
+ const CPURegister& reg4 = NoCPUReg,
+ const CPURegister& reg5 = NoCPUReg,
+ const CPURegister& reg6 = NoCPUReg,
+ const CPURegister& reg7 = NoCPUReg,
+ const CPURegister& reg8 = NoCPUReg);
+
+
+typedef FPRegister DoubleRegister;
+
+
+// -----------------------------------------------------------------------------
+// Lists of registers.
+class CPURegList {
+ public:
+ explicit CPURegList(CPURegister reg1,
+ CPURegister reg2 = NoCPUReg,
+ CPURegister reg3 = NoCPUReg,
+ CPURegister reg4 = NoCPUReg)
+ : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
+ size_(reg1.SizeInBits()), type_(reg1.type()) {
+ ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
+ ASSERT(IsValid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
+ : list_(list), size_(size), type_(type) {
+ ASSERT(IsValid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, unsigned size,
+ unsigned first_reg, unsigned last_reg)
+ : size_(size), type_(type) {
+ ASSERT(((type == CPURegister::kRegister) &&
+ (last_reg < kNumberOfRegisters)) ||
+ ((type == CPURegister::kFPRegister) &&
+ (last_reg < kNumberOfFPRegisters)));
+ ASSERT(last_reg >= first_reg);
+ list_ = (1UL << (last_reg + 1)) - 1;
+ list_ &= ~((1UL << first_reg) - 1);
+ ASSERT(IsValid());
+ }
+
+ CPURegister::RegisterType type() const {
+ ASSERT(IsValid());
+ return type_;
+ }
+
+ RegList list() const {
+ ASSERT(IsValid());
+ return list_;
+ }
+
+ inline void set_list(RegList new_list) {
+ ASSERT(IsValid());
+ list_ = new_list;
+ }
+
+ // Combine another CPURegList into this one. Registers that already exist in
+ // this list are left unchanged. The type and size of the registers in the
+ // 'other' list must match those in this list.
+ void Combine(const CPURegList& other);
+
+ // Remove every register in the other CPURegList from this one. Registers that
+ // do not exist in this list are ignored. The type and size of the registers
+ // in the 'other' list must match those in this list.
+ void Remove(const CPURegList& other);
+
+ // Variants of Combine and Remove which take CPURegisters.
+ void Combine(const CPURegister& other);
+ void Remove(const CPURegister& other1,
+ const CPURegister& other2 = NoCPUReg,
+ const CPURegister& other3 = NoCPUReg,
+ const CPURegister& other4 = NoCPUReg);
+
+ // Variants of Combine and Remove which take a single register by its code;
+ // the type and size of the register is inferred from this list.
+ void Combine(int code);
+ void Remove(int code);
+
+ // Remove all callee-saved registers from the list. This can be useful when
+ // preparing registers for an AAPCS64 function call, for example.
+ void RemoveCalleeSaved();
+
+ CPURegister PopLowestIndex();
+ CPURegister PopHighestIndex();
+
+ // AAPCS64 callee-saved registers.
+ static CPURegList GetCalleeSaved(unsigned size = kXRegSizeInBits);
+ static CPURegList GetCalleeSavedFP(unsigned size = kDRegSizeInBits);
+
+ // AAPCS64 caller-saved registers. Note that this includes lr.
+ static CPURegList GetCallerSaved(unsigned size = kXRegSizeInBits);
+ static CPURegList GetCallerSavedFP(unsigned size = kDRegSizeInBits);
+
+ // Registers saved as safepoints.
+ static CPURegList GetSafepointSavedRegisters();
+
+ bool IsEmpty() const {
+ ASSERT(IsValid());
+ return list_ == 0;
+ }
+
+ bool IncludesAliasOf(const CPURegister& other1,
+ const CPURegister& other2 = NoCPUReg,
+ const CPURegister& other3 = NoCPUReg,
+ const CPURegister& other4 = NoCPUReg) const {
+ ASSERT(IsValid());
+ RegList list = 0;
+ if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
+ if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
+ if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit();
+ if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit();
+ return (list_ & list) != 0;
+ }
+
+ int Count() const {
+ ASSERT(IsValid());
+ return CountSetBits(list_, kRegListSizeInBits);
+ }
+
+ unsigned RegisterSizeInBits() const {
+ ASSERT(IsValid());
+ return size_;
+ }
+
+ unsigned RegisterSizeInBytes() const {
+ int size_in_bits = RegisterSizeInBits();
+ ASSERT((size_in_bits % kBitsPerByte) == 0);
+ return size_in_bits / kBitsPerByte;
+ }
+
+ private:
+ RegList list_;
+ unsigned size_;
+ CPURegister::RegisterType type_;
+
+ bool IsValid() const {
+ const RegList kValidRegisters = 0x8000000ffffffff;
+ const RegList kValidFPRegisters = 0x0000000ffffffff;
+ switch (type_) {
+ case CPURegister::kRegister:
+ return (list_ & kValidRegisters) == list_;
+ case CPURegister::kFPRegister:
+ return (list_ & kValidFPRegisters) == list_;
+ case CPURegister::kNoRegister:
+ return list_ == 0;
+ default:
+ UNREACHABLE();
+ return false;
+ }
+ }
+};
+
+
+// AAPCS64 callee-saved registers.
+#define kCalleeSaved CPURegList::GetCalleeSaved()
+#define kCalleeSavedFP CPURegList::GetCalleeSavedFP()
+
+
+// AAPCS64 caller-saved registers. Note that this includes lr.
+#define kCallerSaved CPURegList::GetCallerSaved()
+#define kCallerSavedFP CPURegList::GetCallerSavedFP()
+
+
+// -----------------------------------------------------------------------------
+// Operands.
+const int kSmiShift = kSmiTagSize + kSmiShiftSize;
+const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
+
+// Represents an operand in a machine instruction.
+class Operand {
+ // TODO(all): If necessary, study more in details which methods
+ // TODO(all): should be inlined or not.
+ public:
+ // rm, {<shift> {#<shift_amount>}}
+ // where <shift> is one of {LSL, LSR, ASR, ROR}.
+ // <shift_amount> is uint6_t.
+ // This is allowed to be an implicit constructor because Operand is
+ // a wrapper class that doesn't normally perform any type conversion.
+ inline Operand(Register reg,
+ Shift shift = LSL,
+ unsigned shift_amount = 0); // NOLINT(runtime/explicit)
+
+ // rm, <extend> {#<shift_amount>}
+ // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
+ // <shift_amount> is uint2_t.
+ inline Operand(Register reg,
+ Extend extend,
+ unsigned shift_amount = 0);
+
+ template<typename T>
+ inline explicit Operand(Handle<T> handle);
+
+ // Implicit constructor for all int types, ExternalReference, and Smi.
+ template<typename T>
+ inline Operand(T t); // NOLINT(runtime/explicit)
+
+ // Implicit constructor for int types.
+ template<typename int_t>
+ inline Operand(int_t t, RelocInfo::Mode rmode);
+
+ inline bool IsImmediate() const;
+ inline bool IsShiftedRegister() const;
+ inline bool IsExtendedRegister() const;
+ inline bool IsZero() const;
+
+ // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
+ // which helps in the encoding of instructions that use the stack pointer.
+ inline Operand ToExtendedRegister() const;
+
+ inline int64_t immediate() const;
+ inline Register reg() const;
+ inline Shift shift() const;
+ inline Extend extend() const;
+ inline unsigned shift_amount() const;
+
+ // Relocation information.
+ RelocInfo::Mode rmode() const { return rmode_; }
+ void set_rmode(RelocInfo::Mode rmode) { rmode_ = rmode; }
+ bool NeedsRelocation() const;
+
+ // Helpers
+ inline static Operand UntagSmi(Register smi);
+ inline static Operand UntagSmiAndScale(Register smi, int scale);
+
+ private:
+ void initialize_handle(Handle<Object> value);
+ int64_t immediate_;
+ Register reg_;
+ Shift shift_;
+ Extend extend_;
+ unsigned shift_amount_;
+ RelocInfo::Mode rmode_;
+};
+
+
+// MemOperand represents a memory operand in a load or store instruction.
+class MemOperand {
+ public:
+ inline explicit MemOperand(Register base,
+ ptrdiff_t offset = 0,
+ AddrMode addrmode = Offset);
+ inline explicit MemOperand(Register base,
+ Register regoffset,
+ Shift shift = LSL,
+ unsigned shift_amount = 0);
+ inline explicit MemOperand(Register base,
+ Register regoffset,
+ Extend extend,
+ unsigned shift_amount = 0);
+ inline explicit MemOperand(Register base,
+ const Operand& offset,
+ AddrMode addrmode = Offset);
+
+ const Register& base() const { return base_; }
+ const Register& regoffset() const { return regoffset_; }
+ ptrdiff_t offset() const { return offset_; }
+ AddrMode addrmode() const { return addrmode_; }
+ Shift shift() const { return shift_; }
+ Extend extend() const { return extend_; }
+ unsigned shift_amount() const { return shift_amount_; }
+ inline bool IsImmediateOffset() const;
+ inline bool IsRegisterOffset() const;
+ inline bool IsPreIndex() const;
+ inline bool IsPostIndex() const;
+
+ // For offset modes, return the offset as an Operand. This helper cannot
+ // handle indexed modes.
+ inline Operand OffsetAsOperand() const;
+
+ private:
+ Register base_;
+ Register regoffset_;
+ ptrdiff_t offset_;
+ AddrMode addrmode_;
+ Shift shift_;
+ Extend extend_;
+ unsigned shift_amount_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Assembler.
+
+class Assembler : public AssemblerBase {
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is NULL, the assembler allocates and grows its own
+ // buffer, and buffer_size determines the initial buffer size. The buffer is
+ // owned by the assembler and deallocated upon destruction of the assembler.
+ //
+ // If the provided buffer is not NULL, the assembler uses the provided buffer
+ // for code generation and assumes its size to be buffer_size. If the buffer
+ // is too small, a fatal error occurs. No deallocation of the buffer is done
+ // upon destruction of the assembler.
+ Assembler(Isolate* arg_isolate, void* buffer, int buffer_size);
+
+ virtual ~Assembler();
+
+ virtual void AbortedCodeGeneration() {
+ num_pending_reloc_info_ = 0;
+ }
+
+ // System functions ---------------------------------------------------------
+ // Start generating code from the beginning of the buffer, discarding any code
+ // and data that has already been emitted into the buffer.
+ //
+ // In order to avoid any accidental transfer of state, Reset ASSERTs that the
+ // constant pool is not blocked.
+ void Reset();
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor
+ // desc. GetCode() is idempotent; it returns the same result if no other
+ // Assembler functions are invoked in between GetCode() calls.
+ //
+ // The descriptor (desc) can be NULL. In that case, the code is finalized as
+ // usual, but the descriptor is not populated.
+ void GetCode(CodeDesc* desc);
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+
+ inline void Unreachable();
+
+ // Label --------------------------------------------------------------------
+ // Bind a label to the current pc. Note that labels can only be bound once,
+ // and if labels are linked to other instructions, they _must_ be bound
+ // before they go out of scope.
+ void bind(Label* label);
+
+
+ // RelocInfo and pools ------------------------------------------------------
+
+ // Record relocation information for current pc_.
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ // Return the address in the constant pool of the code target address used by
+ // the branch/call instruction at pc.
+ inline static Address target_pointer_address_at(Address pc);
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ inline static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool);
+ inline static void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target);
+ static inline Address target_address_at(Address pc, Code* code);
+ static inline void set_target_address_at(Address pc,
+ Code* code,
+ Address target);
+
+ // Return the code target address at a call site from the return address of
+ // that call in the instruction stream.
+ inline static Address target_address_from_return_address(Address pc);
+
+ // Given the address of the beginning of a call, return the address in the
+ // instruction stream that call will return from.
+ inline static Address return_address_from_call_start(Address pc);
+
+ // This sets the branch destination (which is in the constant pool on ARM).
+ // This is for calls and branches within generated code.
+ inline static void deserialization_set_special_target_at(
+ Address constant_pool_entry, Code* code, Address target);
+
+ // All addresses in the constant pool are the same size as pointers.
+ static const int kSpecialTargetSize = kPointerSize;
+
+ // The sizes of the call sequences emitted by MacroAssembler::Call.
+ // Wherever possible, use MacroAssembler::CallSize instead of these constants,
+ // as it will choose the correct value for a given relocation mode.
+ //
+ // Without relocation:
+ // movz temp, #(target & 0x000000000000ffff)
+ // movk temp, #(target & 0x00000000ffff0000)
+ // movk temp, #(target & 0x0000ffff00000000)
+ // blr temp
+ //
+ // With relocation:
+ // ldr temp, =target
+ // blr temp
+ static const int kCallSizeWithoutRelocation = 4 * kInstructionSize;
+ static const int kCallSizeWithRelocation = 2 * kInstructionSize;
+
+ // Size of the generated code in bytes
+ uint64_t SizeOfGeneratedCode() const {
+ ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
+ return pc_ - buffer_;
+ }
+
+ // Return the code size generated from label to the current position.
+ uint64_t SizeOfCodeGeneratedSince(const Label* label) {
+ ASSERT(label->is_bound());
+ ASSERT(pc_offset() >= label->pos());
+ ASSERT(pc_offset() < buffer_size_);
+ return pc_offset() - label->pos();
+ }
+
+ // Check the size of the code generated since the given label. This function
+ // is used primarily to work around comparisons between signed and unsigned
+ // quantities, since V8 uses both.
+ // TODO(jbramley): Work out what sign to use for these things and if possible,
+ // change things to be consistent.
+ void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
+ ASSERT(size >= 0);
+ ASSERT(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
+ }
+
+ // Return the number of instructions generated from label to the
+ // current position.
+ int InstructionsGeneratedSince(const Label* label) {
+ return SizeOfCodeGeneratedSince(label) / kInstructionSize;
+ }
+
+ // Number of instructions generated for the return sequence in
+ // FullCodeGenerator::EmitReturnSequence.
+ static const int kJSRetSequenceInstructions = 7;
+ // Distance between start of patched return sequence and the emitted address
+ // to jump to.
+ static const int kPatchReturnSequenceAddressOffset = 0;
+ static const int kPatchDebugBreakSlotAddressOffset = 0;
+
+ // Number of instructions necessary to be able to later patch it to a call.
+ // See Debug::GenerateSlot() and BreakLocationIterator::SetDebugBreakAtSlot().
+ static const int kDebugBreakSlotInstructions = 4;
+ static const int kDebugBreakSlotLength =
+ kDebugBreakSlotInstructions * kInstructionSize;
+
+ static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstructionSize;
+
+ // Prevent contant pool emission until EndBlockConstPool is called.
+ // Call to this function can be nested but must be followed by an equal
+ // number of call to EndBlockConstpool.
+ void StartBlockConstPool();
+
+ // Resume constant pool emission. Need to be called as many time as
+ // StartBlockConstPool to have an effect.
+ void EndBlockConstPool();
+
+ bool is_const_pool_blocked() const;
+ static bool IsConstantPoolAt(Instruction* instr);
+ static int ConstantPoolSizeAt(Instruction* instr);
+ // See Assembler::CheckConstPool for more info.
+ void ConstantPoolMarker(uint32_t size);
+ void EmitPoolGuard();
+ void ConstantPoolGuard();
+
+ // Prevent veneer pool emission until EndBlockVeneerPool is called.
+ // Call to this function can be nested but must be followed by an equal
+ // number of call to EndBlockConstpool.
+ void StartBlockVeneerPool();
+
+ // Resume constant pool emission. Need to be called as many time as
+ // StartBlockVeneerPool to have an effect.
+ void EndBlockVeneerPool();
+
+ bool is_veneer_pool_blocked() const {
+ return veneer_pool_blocked_nesting_ > 0;
+ }
+
+ // Block/resume emission of constant pools and veneer pools.
+ void StartBlockPools() {
+ StartBlockConstPool();
+ StartBlockVeneerPool();
+ }
+ void EndBlockPools() {
+ EndBlockConstPool();
+ EndBlockVeneerPool();
+ }
+
+ // Debugging ----------------------------------------------------------------
+ PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+ void RecordComment(const char* msg);
+ int buffer_space() const;
+
+ // Mark address of the ExitJSFrame code.
+ void RecordJSReturn();
+
+ // Mark address of a debug break slot.
+ void RecordDebugBreakSlot();
+
+ // Record the emission of a constant pool.
+ //
+ // The emission of constant and veneer pools depends on the size of the code
+ // generated and the number of RelocInfo recorded.
+ // The Debug mechanism needs to map code offsets between two versions of a
+ // function, compiled with and without debugger support (see for example
+ // Debug::PrepareForBreakPoints()).
+ // Compiling functions with debugger support generates additional code
+ // (Debug::GenerateSlot()). This may affect the emission of the pools and
+ // cause the version of the code with debugger support to have pools generated
+ // in different places.
+ // Recording the position and size of emitted pools allows to correctly
+ // compute the offset mappings between the different versions of a function in
+ // all situations.
+ //
+ // The parameter indicates the size of the pool (in bytes), including
+ // the marker and branch over the data.
+ void RecordConstPool(int size);
+
+
+ // Instruction set functions ------------------------------------------------
+
+ // Branch / Jump instructions.
+ // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
+ // Branch to register.
+ void br(const Register& xn);
+
+ // Branch-link to register.
+ void blr(const Register& xn);
+
+ // Branch to register with return hint.
+ void ret(const Register& xn = lr);
+
+ // Unconditional branch to label.
+ void b(Label* label);
+
+ // Conditional branch to label.
+ void b(Label* label, Condition cond);
+
+ // Unconditional branch to PC offset.
+ void b(int imm26);
+
+ // Conditional branch to PC offset.
+ void b(int imm19, Condition cond);
+
+ // Branch-link to label / pc offset.
+ void bl(Label* label);
+ void bl(int imm26);
+
+ // Compare and branch to label / pc offset if zero.
+ void cbz(const Register& rt, Label* label);
+ void cbz(const Register& rt, int imm19);
+
+ // Compare and branch to label / pc offset if not zero.
+ void cbnz(const Register& rt, Label* label);
+ void cbnz(const Register& rt, int imm19);
+
+ // Test bit and branch to label / pc offset if zero.
+ void tbz(const Register& rt, unsigned bit_pos, Label* label);
+ void tbz(const Register& rt, unsigned bit_pos, int imm14);
+
+ // Test bit and branch to label / pc offset if not zero.
+ void tbnz(const Register& rt, unsigned bit_pos, Label* label);
+ void tbnz(const Register& rt, unsigned bit_pos, int imm14);
+
+ // Address calculation instructions.
+ // Calculate a PC-relative address. Unlike for branches the offset in adr is
+ // unscaled (i.e. the result can be unaligned).
+ void adr(const Register& rd, Label* label);
+ void adr(const Register& rd, int imm21);
+
+ // Data Processing instructions.
+ // Add.
+ void add(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Add and update status flags.
+ void adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Compare negative.
+ void cmn(const Register& rn, const Operand& operand);
+
+ // Subtract.
+ void sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract and update status flags.
+ void subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Compare.
+ void cmp(const Register& rn, const Operand& operand);
+
+ // Negate.
+ void neg(const Register& rd,
+ const Operand& operand);
+
+ // Negate and update status flags.
+ void negs(const Register& rd,
+ const Operand& operand);
+
+ // Add with carry bit.
+ void adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Add with carry bit and update status flags.
+ void adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract with carry bit.
+ void sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract with carry bit and update status flags.
+ void sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Negate with carry bit.
+ void ngc(const Register& rd,
+ const Operand& operand);
+
+ // Negate with carry bit and update status flags.
+ void ngcs(const Register& rd,
+ const Operand& operand);
+
+ // Logical instructions.
+ // Bitwise and (A & B).
+ void and_(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bitwise and (A & B) and update status flags.
+ void ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bit test, and set flags.
+ void tst(const Register& rn, const Operand& operand);
+
+ // Bit clear (A & ~B).
+ void bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bit clear (A & ~B) and update status flags.
+ void bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bitwise or (A | B).
+ void orr(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise nor (A | ~B).
+ void orn(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise eor/xor (A ^ B).
+ void eor(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise enor/xnor (A ^ ~B).
+ void eon(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Logical shift left variable.
+ void lslv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Logical shift right variable.
+ void lsrv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Arithmetic shift right variable.
+ void asrv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Rotate right variable.
+ void rorv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Bitfield instructions.
+ // Bitfield move.
+ void bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Signed bitfield move.
+ void sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Unsigned bitfield move.
+ void ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Bfm aliases.
+ // Bitfield insert.
+ void bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
+ }
+
+ // Bitfield extract and insert low.
+ void bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ bfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Sbfm aliases.
+ // Arithmetic shift right.
+ void asr(const Register& rd, const Register& rn, unsigned shift) {
+ ASSERT(shift < rd.SizeInBits());
+ sbfm(rd, rn, shift, rd.SizeInBits() - 1);
+ }
+
+ // Signed bitfield insert in zero.
+ void sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
+ }
+
+ // Signed bitfield extract.
+ void sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ sbfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Signed extend byte.
+ void sxtb(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 7);
+ }
+
+ // Signed extend halfword.
+ void sxth(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 15);
+ }
+
+ // Signed extend word.
+ void sxtw(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 31);
+ }
+
+ // Ubfm aliases.
+ // Logical shift left.
+ void lsl(const Register& rd, const Register& rn, unsigned shift) {
+ unsigned reg_size = rd.SizeInBits();
+ ASSERT(shift < reg_size);
+ ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
+ }
+
+ // Logical shift right.
+ void lsr(const Register& rd, const Register& rn, unsigned shift) {
+ ASSERT(shift < rd.SizeInBits());
+ ubfm(rd, rn, shift, rd.SizeInBits() - 1);
+ }
+
+ // Unsigned bitfield insert in zero.
+ void ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
+ }
+
+ // Unsigned bitfield extract.
+ void ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(width >= 1);
+ ASSERT(lsb + width <= rn.SizeInBits());
+ ubfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Unsigned extend byte.
+ void uxtb(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 7);
+ }
+
+ // Unsigned extend halfword.
+ void uxth(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 15);
+ }
+
+ // Unsigned extend word.
+ void uxtw(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 31);
+ }
+
+ // Extract.
+ void extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb);
+
+ // Conditional select: rd = cond ? rn : rm.
+ void csel(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select increment: rd = cond ? rn : rm + 1.
+ void csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select inversion: rd = cond ? rn : ~rm.
+ void csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select negation: rd = cond ? rn : -rm.
+ void csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional set: rd = cond ? 1 : 0.
+ void cset(const Register& rd, Condition cond);
+
+ // Conditional set minus: rd = cond ? -1 : 0.
+ void csetm(const Register& rd, Condition cond);
+
+ // Conditional increment: rd = cond ? rn + 1 : rn.
+ void cinc(const Register& rd, const Register& rn, Condition cond);
+
+ // Conditional invert: rd = cond ? ~rn : rn.
+ void cinv(const Register& rd, const Register& rn, Condition cond);
+
+ // Conditional negate: rd = cond ? -rn : rn.
+ void cneg(const Register& rd, const Register& rn, Condition cond);
+
+ // Extr aliases.
+ void ror(const Register& rd, const Register& rs, unsigned shift) {
+ extr(rd, rs, rs, shift);
+ }
+
+ // Conditional comparison.
+ // Conditional compare negative.
+ void ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // Conditional compare.
+ void ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // Multiplication.
+ // 32 x 32 -> 32-bit and 64 x 64 -> 64-bit multiply.
+ void mul(const Register& rd, const Register& rn, const Register& rm);
+
+ // 32 + 32 x 32 -> 32-bit and 64 + 64 x 64 -> 64-bit multiply accumulate.
+ void madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // -(32 x 32) -> 32-bit and -(64 x 64) -> 64-bit multiply.
+ void mneg(const Register& rd, const Register& rn, const Register& rm);
+
+ // 32 - 32 x 32 -> 32-bit and 64 - 64 x 64 -> 64-bit multiply subtract.
+ void msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // 32 x 32 -> 64-bit multiply.
+ void smull(const Register& rd, const Register& rn, const Register& rm);
+
+ // Xd = bits<127:64> of Xn * Xm.
+ void smulh(const Register& rd, const Register& rn, const Register& rm);
+
+ // Signed 32 x 32 -> 64-bit multiply and accumulate.
+ void smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned 32 x 32 -> 64-bit multiply and accumulate.
+ void umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed 32 x 32 -> 64-bit multiply and subtract.
+ void smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned 32 x 32 -> 64-bit multiply and subtract.
+ void umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed integer divide.
+ void sdiv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Unsigned integer divide.
+ void udiv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Bit count, bit reverse and endian reverse.
+ void rbit(const Register& rd, const Register& rn);
+ void rev16(const Register& rd, const Register& rn);
+ void rev32(const Register& rd, const Register& rn);
+ void rev(const Register& rd, const Register& rn);
+ void clz(const Register& rd, const Register& rn);
+ void cls(const Register& rd, const Register& rn);
+
+ // Memory instructions.
+
+ // Load literal from pc + offset_from_pc.
+ void LoadLiteral(const CPURegister& rt, int offset_from_pc);
+
+ // Load integer or FP register.
+ void ldr(const CPURegister& rt, const MemOperand& src);
+
+ // Store integer or FP register.
+ void str(const CPURegister& rt, const MemOperand& dst);
+
+ // Load word with sign extension.
+ void ldrsw(const Register& rt, const MemOperand& src);
+
+ // Load byte.
+ void ldrb(const Register& rt, const MemOperand& src);
+
+ // Store byte.
+ void strb(const Register& rt, const MemOperand& dst);
+
+ // Load byte with sign extension.
+ void ldrsb(const Register& rt, const MemOperand& src);
+
+ // Load half-word.
+ void ldrh(const Register& rt, const MemOperand& src);
+
+ // Store half-word.
+ void strh(const Register& rt, const MemOperand& dst);
+
+ // Load half-word with sign extension.
+ void ldrsh(const Register& rt, const MemOperand& src);
+
+ // Load integer or FP register pair.
+ void ldp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+
+ // Store integer or FP register pair.
+ void stp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+
+ // Load word pair with sign extension.
+ void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
+
+ // Load integer or FP register pair, non-temporal.
+ void ldnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+
+ // Store integer or FP register pair, non-temporal.
+ void stnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+
+ // Load literal to register.
+ void ldr(const Register& rt, uint64_t imm);
+
+ // Load literal to FP register.
+ void ldr(const FPRegister& ft, double imm);
+ void ldr(const FPRegister& ft, float imm);
+
+ // Move instructions. The default shift of -1 indicates that the move
+ // instruction will calculate an appropriate 16-bit immediate and left shift
+ // that is equal to the 64-bit immediate argument. If an explicit left shift
+ // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
+ //
+ // For movk, an explicit shift can be used to indicate which half word should
+ // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
+ // half word with zero, whereas movk(x0, 0, 48) will overwrite the
+ // most-significant.
+
+ // Move and keep.
+ void movk(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVK);
+ }
+
+ // Move with non-zero.
+ void movn(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVN);
+ }
+
+ // Move with zero.
+ void movz(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVZ);
+ }
+
+ // Misc instructions.
+ // Monitor debug-mode breakpoint.
+ void brk(int code);
+
+ // Halting debug-mode breakpoint.
+ void hlt(int code);
+
+ // Move register to register.
+ void mov(const Register& rd, const Register& rn);
+
+ // Move NOT(operand) to register.
+ void mvn(const Register& rd, const Operand& operand);
+
+ // System instructions.
+ // Move to register from system register.
+ void mrs(const Register& rt, SystemRegister sysreg);
+
+ // Move from register to system register.
+ void msr(SystemRegister sysreg, const Register& rt);
+
+ // System hint.
+ void hint(SystemHint code);
+
+ // Data memory barrier
+ void dmb(BarrierDomain domain, BarrierType type);
+
+ // Data synchronization barrier
+ void dsb(BarrierDomain domain, BarrierType type);
+
+ // Instruction synchronization barrier
+ void isb();
+
+ // Alias for system instructions.
+ void nop() { hint(NOP); }
+
+ // Different nop operations are used by the code generator to detect certain
+ // states of the generated code.
+ enum NopMarkerTypes {
+ DEBUG_BREAK_NOP,
+ INTERRUPT_CODE_NOP,
+ FIRST_NOP_MARKER = DEBUG_BREAK_NOP,
+ LAST_NOP_MARKER = INTERRUPT_CODE_NOP
+ };
+
+ void nop(NopMarkerTypes n) {
+ ASSERT((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
+ mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
+ }
+
+ // FP instructions.
+ // Move immediate to FP register.
+ void fmov(FPRegister fd, double imm);
+ void fmov(FPRegister fd, float imm);
+
+ // Move FP register to register.
+ void fmov(Register rd, FPRegister fn);
+
+ // Move register to FP register.
+ void fmov(FPRegister fd, Register rn);
+
+ // Move FP register to FP register.
+ void fmov(FPRegister fd, FPRegister fn);
+
+ // FP add.
+ void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP subtract.
+ void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP multiply.
+ void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP fused multiply and add.
+ void fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP fused multiply and subtract.
+ void fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP fused multiply, add and negate.
+ void fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP fused multiply, subtract and negate.
+ void fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+
+ // FP divide.
+ void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP maximum.
+ void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP minimum.
+ void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP maximum.
+ void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP minimum.
+ void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
+
+ // FP absolute.
+ void fabs(const FPRegister& fd, const FPRegister& fn);
+
+ // FP negate.
+ void fneg(const FPRegister& fd, const FPRegister& fn);
+
+ // FP square root.
+ void fsqrt(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (nearest with ties to away).
+ void frinta(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (nearest with ties to even).
+ void frintn(const FPRegister& fd, const FPRegister& fn);
+
+ // FP round to integer (towards zero.)
+ void frintz(const FPRegister& fd, const FPRegister& fn);
+
+ // FP compare registers.
+ void fcmp(const FPRegister& fn, const FPRegister& fm);
+
+ // FP compare immediate.
+ void fcmp(const FPRegister& fn, double value);
+
+ // FP conditional compare.
+ void fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // FP conditional select.
+ void fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond);
+
+ // Common FP Convert function
+ void FPConvertToInt(const Register& rd,
+ const FPRegister& fn,
+ FPIntegerConvertOp op);
+
+ // FP convert between single and double precision.
+ void fcvt(const FPRegister& fd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (nearest with ties to away).
+ void fcvtau(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (nearest with ties to away).
+ void fcvtas(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (round towards -infinity).
+ void fcvtmu(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (round towards -infinity).
+ void fcvtms(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (nearest with ties to even).
+ void fcvtnu(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (nearest with ties to even).
+ void fcvtns(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to unsigned integer (round towards zero).
+ void fcvtzu(const Register& rd, const FPRegister& fn);
+
+ // Convert FP to signed integer (rounf towards zero).
+ void fcvtzs(const Register& rd, const FPRegister& fn);
+
+ // Convert signed integer or fixed point to FP.
+ void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
+
+ // Convert unsigned integer or fixed point to FP.
+ void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
+
+ // Instruction functions used only for test, debug, and patching.
+ // Emit raw instructions in the instruction stream.
+ void dci(Instr raw_inst) { Emit(raw_inst); }
+
+ // Emit 8 bits of data in the instruction stream.
+ void dc8(uint8_t data) { EmitData(&data, sizeof(data)); }
+
+ // Emit 32 bits of data in the instruction stream.
+ void dc32(uint32_t data) { EmitData(&data, sizeof(data)); }
+
+ // Emit 64 bits of data in the instruction stream.
+ void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
+
+ // Copy a string into the instruction stream, including the terminating NULL
+ // character. The instruction pointer (pc_) is then aligned correctly for
+ // subsequent instructions.
+ void EmitStringData(const char * string) {
+ size_t len = strlen(string) + 1;
+ ASSERT(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
+ EmitData(string, len);
+ // Pad with NULL characters until pc_ is aligned.
+ const char pad[] = {'\0', '\0', '\0', '\0'};
+ STATIC_ASSERT(sizeof(pad) == kInstructionSize);
+ byte* next_pc = AlignUp(pc_, kInstructionSize);
+ EmitData(&pad, next_pc - pc_);
+ }
+
+ // Pseudo-instructions ------------------------------------------------------
+
+ // Parameters are described in arm64/instructions-arm64.h.
+ void debug(const char* message, uint32_t code, Instr params = BREAK);
+
+ // Required by V8.
+ void dd(uint32_t data) { dc32(data); }
+ void db(uint8_t data) { dc8(data); }
+
+ // Code generation helpers --------------------------------------------------
+
+ unsigned num_pending_reloc_info() const { return num_pending_reloc_info_; }
+
+ Instruction* InstructionAt(int offset) const {
+ return reinterpret_cast<Instruction*>(buffer_ + offset);
+ }
+
+ // Register encoding.
+ static Instr Rd(CPURegister rd) {
+ ASSERT(rd.code() != kSPRegInternalCode);
+ return rd.code() << Rd_offset;
+ }
+
+ static Instr Rn(CPURegister rn) {
+ ASSERT(rn.code() != kSPRegInternalCode);
+ return rn.code() << Rn_offset;
+ }
+
+ static Instr Rm(CPURegister rm) {
+ ASSERT(rm.code() != kSPRegInternalCode);
+ return rm.code() << Rm_offset;
+ }
+
+ static Instr Ra(CPURegister ra) {
+ ASSERT(ra.code() != kSPRegInternalCode);
+ return ra.code() << Ra_offset;
+ }
+
+ static Instr Rt(CPURegister rt) {
+ ASSERT(rt.code() != kSPRegInternalCode);
+ return rt.code() << Rt_offset;
+ }
+
+ static Instr Rt2(CPURegister rt2) {
+ ASSERT(rt2.code() != kSPRegInternalCode);
+ return rt2.code() << Rt2_offset;
+ }
+
+ // These encoding functions allow the stack pointer to be encoded, and
+ // disallow the zero register.
+ static Instr RdSP(Register rd) {
+ ASSERT(!rd.IsZero());
+ return (rd.code() & kRegCodeMask) << Rd_offset;
+ }
+
+ static Instr RnSP(Register rn) {
+ ASSERT(!rn.IsZero());
+ return (rn.code() & kRegCodeMask) << Rn_offset;
+ }
+
+ // Flags encoding.
+ inline static Instr Flags(FlagsUpdate S);
+ inline static Instr Cond(Condition cond);
+
+ // PC-relative address encoding.
+ inline static Instr ImmPCRelAddress(int imm21);
+
+ // Branch encoding.
+ inline static Instr ImmUncondBranch(int imm26);
+ inline static Instr ImmCondBranch(int imm19);
+ inline static Instr ImmCmpBranch(int imm19);
+ inline static Instr ImmTestBranch(int imm14);
+ inline static Instr ImmTestBranchBit(unsigned bit_pos);
+
+ // Data Processing encoding.
+ inline static Instr SF(Register rd);
+ inline static Instr ImmAddSub(int64_t imm);
+ inline static Instr ImmS(unsigned imms, unsigned reg_size);
+ inline static Instr ImmR(unsigned immr, unsigned reg_size);
+ inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
+ inline static Instr ImmRotate(unsigned immr, unsigned reg_size);
+ inline static Instr ImmLLiteral(int imm19);
+ inline static Instr BitN(unsigned bitn, unsigned reg_size);
+ inline static Instr ShiftDP(Shift shift);
+ inline static Instr ImmDPShift(unsigned amount);
+ inline static Instr ExtendMode(Extend extend);
+ inline static Instr ImmExtendShift(unsigned left_shift);
+ inline static Instr ImmCondCmp(unsigned imm);
+ inline static Instr Nzcv(StatusFlags nzcv);
+
+ // MemOperand offset encoding.
+ inline static Instr ImmLSUnsigned(int imm12);
+ inline static Instr ImmLS(int imm9);
+ inline static Instr ImmLSPair(int imm7, LSDataSize size);
+ inline static Instr ImmShiftLS(unsigned shift_amount);
+ inline static Instr ImmException(int imm16);
+ inline static Instr ImmSystemRegister(int imm15);
+ inline static Instr ImmHint(int imm7);
+ inline static Instr ImmBarrierDomain(int imm2);
+ inline static Instr ImmBarrierType(int imm2);
+ inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
+
+ // Move immediates encoding.
+ inline static Instr ImmMoveWide(uint64_t imm);
+ inline static Instr ShiftMoveWide(int64_t shift);
+
+ // FP Immediates.
+ static Instr ImmFP32(float imm);
+ static Instr ImmFP64(double imm);
+ inline static Instr FPScale(unsigned scale);
+
+ // FP register type.
+ inline static Instr FPType(FPRegister fd);
+
+ // Class for scoping postponing the constant pool generation.
+ class BlockConstPoolScope {
+ public:
+ explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockConstPool();
+ }
+ ~BlockConstPoolScope() {
+ assem_->EndBlockConstPool();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
+ };
+
+ // Check if is time to emit a constant pool.
+ void CheckConstPool(bool force_emit, bool require_jump);
+
+ // Allocate a constant pool of the correct size for the generated code.
+ MaybeObject* AllocateConstantPool(Heap* heap);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
+ // Returns true if we should emit a veneer as soon as possible for a branch
+ // which can at most reach to specified pc.
+ bool ShouldEmitVeneer(int max_reachable_pc,
+ int margin = kVeneerDistanceMargin);
+ bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
+ return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
+ }
+
+ // The maximum code size generated for a veneer. Currently one branch
+ // instruction. This is for code size checking purposes, and can be extended
+ // in the future for example if we decide to add nops between the veneers.
+ static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
+
+ void RecordVeneerPool(int location_offset, int size);
+ // Emits veneers for branches that are approaching their maximum range.
+ // If need_protection is true, the veneers are protected by a branch jumping
+ // over the code.
+ void EmitVeneers(bool force_emit, bool need_protection,
+ int margin = kVeneerDistanceMargin);
+ void EmitVeneersGuard() { EmitPoolGuard(); }
+ // Checks whether veneers need to be emitted at this point.
+ // If force_emit is set, a veneer is generated for *all* unresolved branches.
+ void CheckVeneerPool(bool force_emit, bool require_jump,
+ int margin = kVeneerDistanceMargin);
+
+
+ class BlockPoolsScope {
+ public:
+ explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockPools();
+ }
+ ~BlockPoolsScope() {
+ assem_->EndBlockPools();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
+ };
+
+ // Available for constrained code generation scopes. Prefer
+ // MacroAssembler::Mov() when possible.
+ inline void LoadRelocated(const CPURegister& rt, const Operand& operand);
+
+ protected:
+ inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
+
+ void LoadStore(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op);
+ static bool IsImmLSUnscaled(ptrdiff_t offset);
+ static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size);
+
+ void Logical(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op);
+ void LogicalImmediate(const Register& rd,
+ const Register& rn,
+ unsigned n,
+ unsigned imm_s,
+ unsigned imm_r,
+ LogicalOp op);
+ static bool IsImmLogical(uint64_t value,
+ unsigned width,
+ unsigned* n,
+ unsigned* imm_s,
+ unsigned* imm_r);
+
+ void ConditionalCompare(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op);
+ static bool IsImmConditionalCompare(int64_t immediate);
+
+ void AddSubWithCarry(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+ // Functions for emulating operands not directly supported by the instruction
+ // set.
+ void EmitShift(const Register& rd,
+ const Register& rn,
+ Shift shift,
+ unsigned amount);
+ void EmitExtendShift(const Register& rd,
+ const Register& rn,
+ Extend extend,
+ unsigned left_shift);
+
+ void AddSub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op);
+ static bool IsImmAddSub(int64_t immediate);
+
+ static bool IsImmFP32(float imm);
+ static bool IsImmFP64(double imm);
+
+ // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
+ // registers. Only simple loads are supported; sign- and zero-extension (such
+ // as in LDPSW_x or LDRB_w) are not supported.
+ static inline LoadStoreOp LoadOpFor(const CPURegister& rt);
+ static inline LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
+ const CPURegister& rt2);
+ static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
+ static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
+ const CPURegister& rt2);
+ static inline LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2);
+ static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2);
+
+ // Remove the specified branch from the unbound label link chain.
+ // If available, a veneer for this label can be used for other branches in the
+ // chain if the link chain cannot be fixed up without this branch.
+ void RemoveBranchFromLabelLinkChain(Instruction* branch,
+ Label* label,
+ Instruction* label_veneer = NULL);
+
+ private:
+ // Instruction helpers.
+ void MoveWide(const Register& rd,
+ uint64_t imm,
+ int shift,
+ MoveWideImmediateOp mov_op);
+ void DataProcShiftedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op);
+ void DataProcExtendedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op);
+ void LoadStorePair(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op);
+ void LoadStorePairNonTemporal(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairNonTemporalOp op);
+ // Register the relocation information for the operand and load its value
+ // into rt.
+ void LoadRelocatedValue(const CPURegister& rt,
+ const Operand& operand,
+ LoadLiteralOp op);
+ void ConditionalSelect(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond,
+ ConditionalSelectOp op);
+ void DataProcessing1Source(const Register& rd,
+ const Register& rn,
+ DataProcessing1SourceOp op);
+ void DataProcessing3Source(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra,
+ DataProcessing3SourceOp op);
+ void FPDataProcessing1Source(const FPRegister& fd,
+ const FPRegister& fn,
+ FPDataProcessing1SourceOp op);
+ void FPDataProcessing2Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ FPDataProcessing2SourceOp op);
+ void FPDataProcessing3Source(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa,
+ FPDataProcessing3SourceOp op);
+
+ // Label helpers.
+
+ // Return an offset for a label-referencing instruction, typically a branch.
+ int LinkAndGetByteOffsetTo(Label* label);
+
+ // This is the same as LinkAndGetByteOffsetTo, but return an offset
+ // suitable for fields that take instruction offsets.
+ inline int LinkAndGetInstructionOffsetTo(Label* label);
+
+ static const int kStartOfLabelLinkChain = 0;
+
+ // Verify that a label's link chain is intact.
+ void CheckLabelLinkChain(Label const * label);
+
+ void RecordLiteral(int64_t imm, unsigned size);
+
+ // Postpone the generation of the constant pool for the specified number of
+ // instructions.
+ void BlockConstPoolFor(int instructions);
+
+ // Emit the instruction at pc_.
+ void Emit(Instr instruction) {
+ STATIC_ASSERT(sizeof(*pc_) == 1);
+ STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
+ ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
+
+ memcpy(pc_, &instruction, sizeof(instruction));
+ pc_ += sizeof(instruction);
+ CheckBuffer();
+ }
+
+ // Emit data inline in the instruction stream.
+ void EmitData(void const * data, unsigned size) {
+ ASSERT(sizeof(*pc_) == 1);
+ ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
+
+ // TODO(all): Somehow register we have some data here. Then we can
+ // disassemble it correctly.
+ memcpy(pc_, data, size);
+ pc_ += size;
+ CheckBuffer();
+ }
+
+ void GrowBuffer();
+ void CheckBuffer();
+
+ // Pc offset of the next constant pool check.
+ int next_constant_pool_check_;
+
+ // Constant pool generation
+ // Pools are emitted in the instruction stream, preferably after unconditional
+ // jumps or after returns from functions (in dead code locations).
+ // If a long code sequence does not contain unconditional jumps, it is
+ // necessary to emit the constant pool before the pool gets too far from the
+ // location it is accessed from. In this case, we emit a jump over the emitted
+ // constant pool.
+ // Constants in the pool may be addresses of functions that gets relocated;
+ // if so, a relocation info entry is associated to the constant pool entry.
+
+ // Repeated checking whether the constant pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated. That also means that the sizing of the buffers is not
+ // an exact science, and that we rely on some slop to not overrun buffers.
+ static const int kCheckConstPoolIntervalInst = 128;
+ static const int kCheckConstPoolInterval =
+ kCheckConstPoolIntervalInst * kInstructionSize;
+
+ // Constants in pools are accessed via pc relative addressing, which can
+ // reach +/-4KB thereby defining a maximum distance between the instruction
+ // and the accessed constant.
+ static const int kMaxDistToConstPool = 4 * KB;
+ static const int kMaxNumPendingRelocInfo =
+ kMaxDistToConstPool / kInstructionSize;
+
+
+ // Average distance beetween a constant pool and the first instruction
+ // accessing the constant pool. Longer distance should result in less I-cache
+ // pollution.
+ // In practice the distance will be smaller since constant pool emission is
+ // forced after function return and sometimes after unconditional branches.
+ static const int kAvgDistToConstPool =
+ kMaxDistToConstPool - kCheckConstPoolInterval;
+
+ // Emission of the constant pool may be blocked in some code sequences.
+ int const_pool_blocked_nesting_; // Block emission if this is not zero.
+ int no_const_pool_before_; // Block emission before this pc offset.
+
+ // Keep track of the first instruction requiring a constant pool entry
+ // since the previous constant pool was emitted.
+ int first_const_pool_use_;
+
+ // Emission of the veneer pools may be blocked in some code sequences.
+ int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
+
+ // Relocation info generation
+ // Each relocation is encoded as a variable size value
+ static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ RelocInfoWriter reloc_info_writer;
+
+ // Relocation info records are also used during code generation as temporary
+ // containers for constants and code target addresses until they are emitted
+ // to the constant pool. These pending relocation info records are temporarily
+ // stored in a separate buffer until a constant pool is emitted.
+ // If every instruction in a long sequence is accessing the pool, we need one
+ // pending relocation entry per instruction.
+
+ // the buffer of pending relocation info
+ RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
+ // number of pending reloc info entries in the buffer
+ int num_pending_reloc_info_;
+
+ // Relocation for a type-recording IC has the AST id added to it. This
+ // member variable is a way to pass the information from the call site to
+ // the relocation info.
+ TypeFeedbackId recorded_ast_id_;
+
+ inline TypeFeedbackId RecordedAstId();
+ inline void ClearRecordedAstId();
+
+ protected:
+ // Record the AST id of the CallIC being compiled, so that it can be placed
+ // in the relocation information.
+ void SetRecordedAstId(TypeFeedbackId ast_id) {
+ ASSERT(recorded_ast_id_.IsNone());
+ recorded_ast_id_ = ast_id;
+ }
+
+ // Code generation
+ // The relocation writer's position is at least kGap bytes below the end of
+ // the generated instructions. This is so that multi-instruction sequences do
+ // not have to check for overflow. The same is true for writes of large
+ // relocation info entries, and debug strings encoded in the instruction
+ // stream.
+ static const int kGap = 128;
+
+ public:
+ class FarBranchInfo {
+ public:
+ FarBranchInfo(int offset, Label* label)
+ : pc_offset_(offset), label_(label) {}
+ // Offset of the branch in the code generation buffer.
+ int pc_offset_;
+ // The label branched to.
+ Label* label_;
+ };
+
+ protected:
+ // Information about unresolved (forward) branches.
+ // The Assembler is only allowed to delete out-of-date information from here
+ // after a label is bound. The MacroAssembler uses this information to
+ // generate veneers.
+ //
+ // The second member gives information about the unresolved branch. The first
+ // member of the pair is the maximum offset that the branch can reach in the
+ // buffer. The map is sorted according to this reachable offset, allowing to
+ // easily check when veneers need to be emitted.
+ // Note that the maximum reachable offset (first member of the pairs) should
+ // always be positive but has the same type as the return value for
+ // pc_offset() for convenience.
+ std::multimap<int, FarBranchInfo> unresolved_branches_;
+
+ // We generate a veneer for a branch if we reach within this distance of the
+ // limit of the range.
+ static const int kVeneerDistanceMargin = 1 * KB;
+ // The factor of 2 is a finger in the air guess. With a default margin of
+ // 1KB, that leaves us an addional 256 instructions to avoid generating a
+ // protective branch.
+ static const int kVeneerNoProtectionFactor = 2;
+ static const int kVeneerDistanceCheckMargin =
+ kVeneerNoProtectionFactor * kVeneerDistanceMargin;
+ int unresolved_branches_first_limit() const {
+ ASSERT(!unresolved_branches_.empty());
+ return unresolved_branches_.begin()->first;
+ }
+ // This is similar to next_constant_pool_check_ and helps reduce the overhead
+ // of checking for veneer pools.
+ // It is maintained to the closest unresolved branch limit minus the maximum
+ // veneer margin (or kMaxInt if there are no unresolved branches).
+ int next_veneer_pool_check_;
+
+ private:
+ // If a veneer is emitted for a branch instruction, that instruction must be
+ // removed from the associated label's link chain so that the assembler does
+ // not later attempt (likely unsuccessfully) to patch it to branch directly to
+ // the label.
+ void DeleteUnresolvedBranchInfoForLabel(Label* label);
+
+ private:
+ PositionsRecorder positions_recorder_;
+ friend class PositionsRecorder;
+ friend class EnsureSpace;
+};
+
+class PatchingAssembler : public Assembler {
+ public:
+ // Create an Assembler with a buffer starting at 'start'.
+ // The buffer size is
+ // size of instructions to patch + kGap
+ // Where kGap is the distance from which the Assembler tries to grow the
+ // buffer.
+ // If more or fewer instructions than expected are generated or if some
+ // relocation information takes space in the buffer, the PatchingAssembler
+ // will crash trying to grow the buffer.
+ PatchingAssembler(Instruction* start, unsigned count)
+ : Assembler(NULL,
+ reinterpret_cast<byte*>(start),
+ count * kInstructionSize + kGap) {
+ StartBlockPools();
+ }
+
+ PatchingAssembler(byte* start, unsigned count)
+ : Assembler(NULL, start, count * kInstructionSize + kGap) {
+ // Block constant pool emission.
+ StartBlockPools();
+ }
+
+ ~PatchingAssembler() {
+ // Const pool should still be blocked.
+ ASSERT(is_const_pool_blocked());
+ EndBlockPools();
+ // Verify we have generated the number of instruction we expected.
+ ASSERT((pc_offset() + kGap) == buffer_size_);
+ // Verify no relocation information has been emitted.
+ ASSERT(num_pending_reloc_info() == 0);
+ // Flush the Instruction cache.
+ size_t length = buffer_size_ - kGap;
+ CPU::FlushICache(buffer_, length);
+ }
+};
+
+
+class EnsureSpace BASE_EMBEDDED {
+ public:
+ explicit EnsureSpace(Assembler* assembler) {
+ assembler->CheckBuffer();
+ }
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/arm64/builtins-arm64.cc b/deps/v8/src/arm64/builtins-arm64.cc
new file mode 100644
index 000000000..01ac4cc5d
--- /dev/null
+++ b/deps/v8/src/arm64/builtins-arm64.cc
@@ -0,0 +1,1562 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "codegen.h"
+#include "debug.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the native context.
+ __ Ldr(result, GlobalObjectMemOperand());
+ __ Ldr(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the InternalArray function from the native context.
+ __ Ldr(result,
+ MemOperand(result,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+ Register result) {
+ // Load the native context.
+ __ Ldr(result, GlobalObjectMemOperand());
+ __ Ldr(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the InternalArray function from the native context.
+ __ Ldr(result, ContextMemOperand(result,
+ Context::INTERNAL_ARRAY_FUNCTION_INDEX));
+}
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ CFunctionId id,
+ BuiltinExtraArguments extra_args) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments excluding receiver
+ // -- x1 : called function (only guaranteed when
+ // extra_args requires it)
+ // -- cp : context
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument (argc == x0)
+ // -- sp[4 * argc] : receiver
+ // -----------------------------------
+
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ __ Push(x1);
+ } else {
+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToExternalReference expects x0 to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ Add(x0, x0, num_extra_args + 1);
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_InternalArrayCode");
+ Label generic_array_code;
+
+ // Get the InternalArray function.
+ GenerateLoadInternalArrayFunction(masm, x1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin InternalArray functions should be maps.
+ __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Tst(x10, kSmiTagMask);
+ __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction);
+ __ CompareObjectType(x10, x11, x12, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+ }
+
+ // Run the native code for the InternalArray function called as a normal
+ // function.
+ InternalArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_ArrayCode");
+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, x1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array functions should be maps.
+ __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Tst(x10, kSmiTagMask);
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
+ __ CompareObjectType(x10, x11, x12, MAP_TYPE);
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+ ArrayConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- x1 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_StringConstructCode");
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_ctor_calls(), 1, x10, x11);
+
+ Register argc = x0;
+ Register function = x1;
+ if (FLAG_debug_code) {
+ __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, x10);
+ __ Cmp(function, x10);
+ __ Assert(eq, kUnexpectedStringFunction);
+ }
+
+ // Load the first arguments in x0 and get rid of the rest.
+ Label no_arguments;
+ __ Cbz(argc, &no_arguments);
+ // First args = sp[(argc - 1) * 8].
+ __ Sub(argc, argc, 1);
+ __ Claim(argc, kXRegSize);
+ // jssp now point to args[0], load and drop args[0] + receiver.
+ Register arg = argc;
+ __ Ldr(arg, MemOperand(jssp, 2 * kPointerSize, PostIndex));
+ argc = NoReg;
+
+ Register argument = x2;
+ Label not_cached, argument_is_string;
+ __ LookupNumberStringCache(arg, // Input.
+ argument, // Result.
+ x10, // Scratch.
+ x11, // Scratch.
+ x12, // Scratch.
+ &not_cached);
+ __ IncrementCounter(counters->string_ctor_cached_number(), 1, x10, x11);
+ __ Bind(&argument_is_string);
+
+ // ----------- S t a t e -------------
+ // -- x2 : argument converted to string
+ // -- x1 : constructor function
+ // -- lr : return address
+ // -----------------------------------
+
+ Label gc_required;
+ Register new_obj = x0;
+ __ Allocate(JSValue::kSize, new_obj, x10, x11, &gc_required, TAG_OBJECT);
+
+ // Initialize the String object.
+ Register map = x3;
+ __ LoadGlobalFunctionInitialMap(function, map, x10);
+ if (FLAG_debug_code) {
+ __ Ldrb(x4, FieldMemOperand(map, Map::kInstanceSizeOffset));
+ __ Cmp(x4, JSValue::kSize >> kPointerSizeLog2);
+ __ Assert(eq, kUnexpectedStringWrapperInstanceSize);
+ __ Ldrb(x4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
+ __ Cmp(x4, 0);
+ __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper);
+ }
+ __ Str(map, FieldMemOperand(new_obj, HeapObject::kMapOffset));
+
+ Register empty = x3;
+ __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(empty, FieldMemOperand(new_obj, JSObject::kPropertiesOffset));
+ __ Str(empty, FieldMemOperand(new_obj, JSObject::kElementsOffset));
+
+ __ Str(argument, FieldMemOperand(new_obj, JSValue::kValueOffset));
+
+ // Ensure the object is fully initialized.
+ STATIC_ASSERT(JSValue::kSize == (4 * kPointerSize));
+
+ __ Ret();
+
+ // The argument was not found in the number to string cache. Check
+ // if it's a string already before calling the conversion builtin.
+ Label convert_argument;
+ __ Bind(&not_cached);
+ __ JumpIfSmi(arg, &convert_argument);
+
+ // Is it a String?
+ __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(x11, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Tbnz(x11, MaskToBit(kIsNotStringMask), &convert_argument);
+ __ Mov(argument, arg);
+ __ IncrementCounter(counters->string_ctor_string_value(), 1, x10, x11);
+ __ B(&argument_is_string);
+
+ // Invoke the conversion builtin and put the result into x2.
+ __ Bind(&convert_argument);
+ __ Push(function); // Preserve the function.
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, x10, x11);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(arg);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ }
+ __ Pop(function);
+ __ Mov(argument, x0);
+ __ B(&argument_is_string);
+
+ // Load the empty string into x2, remove the receiver from the
+ // stack, and jump back to the case where the argument is a string.
+ __ Bind(&no_arguments);
+ __ LoadRoot(argument, Heap::kempty_stringRootIndex);
+ __ Drop(1);
+ __ B(&argument_is_string);
+
+ // At this point the argument is already a string. Call runtime to create a
+ // string wrapper.
+ __ Bind(&gc_required);
+ __ IncrementCounter(counters->string_ctor_gc_required(), 1, x10, x11);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(argument);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ }
+ __ Ret();
+}
+
+
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // - Push a copy of the function onto the stack.
+ // - Push another copy as a parameter to the runtime call.
+ __ Push(x1, x1);
+
+ __ CallRuntime(function_id, 1);
+
+ // - Restore receiver.
+ __ Pop(x1);
+}
+
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
+ __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x2);
+}
+
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+ __ Add(x0, x0, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x0);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However, not
+ // checking may delay installing ready functions, and always checking would be
+ // quite expensive. A good compromise is to first check against stack limit as
+ // a cue for an interrupt signal.
+ Label ok;
+ __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
+ GenerateTailCallToReturnedCode(masm);
+
+ __ Bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function,
+ bool count_constructions,
+ bool create_memento) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- x1 : constructor function
+ // -- x2 : allocation site or undefined
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
+ // Should never count constructions for api objects.
+ ASSERT(!is_api_function || !count_constructions);
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
+ // Should never create mementos before slack tracking is finished.
+ ASSERT(!count_constructions || !create_memento);
+
+ Isolate* isolate = masm->isolate();
+
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+ // Preserve the three incoming parameters on the stack.
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(x2, x10);
+ __ Push(x2);
+ }
+
+ Register argc = x0;
+ Register constructor = x1;
+ // x1: constructor function
+ __ SmiTag(argc);
+ __ Push(argc, constructor);
+ // sp[0] : Constructor function.
+ // sp[1]: number of arguments (smi-tagged)
+
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ Label undo_allocation;
+#if ENABLE_DEBUGGER_SUPPORT
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(isolate);
+ __ Mov(x2, Operand(debug_step_in_fp));
+ __ Ldr(x2, MemOperand(x2));
+ __ Cbnz(x2, &rt_call);
+#endif
+ // Load the initial map and verify that it is in fact a map.
+ Register init_map = x2;
+ __ Ldr(init_map,
+ FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(init_map, &rt_call);
+ __ JumpIfNotObjectType(init_map, x10, x11, MAP_TYPE, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the initial
+ // map's instance type would be JS_FUNCTION_TYPE.
+ __ CompareInstanceType(init_map, x10, JS_FUNCTION_TYPE);
+ __ B(eq, &rt_call);
+
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ Ldr(x3, FieldMemOperand(constructor,
+ JSFunction::kSharedFunctionInfoOffset));
+ MemOperand constructor_count =
+ FieldMemOperand(x3, SharedFunctionInfo::kConstructionCountOffset);
+ __ Ldrb(x4, constructor_count);
+ __ Subs(x4, x4, 1);
+ __ Strb(x4, constructor_count);
+ __ B(ne, &allocate);
+
+ // Push the constructor and map to the stack, and the constructor again
+ // as argument to the runtime call.
+ __ Push(constructor, init_map, constructor);
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
+ __ Pop(init_map, constructor);
+ __ Bind(&allocate);
+ }
+
+ // Now allocate the JSObject on the heap.
+ Register obj_size = x3;
+ Register new_obj = x4;
+ __ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset));
+ if (create_memento) {
+ __ Add(x7, obj_size,
+ Operand(AllocationMemento::kSize / kPointerSize));
+ __ Allocate(x7, new_obj, x10, x11, &rt_call, SIZE_IN_WORDS);
+ } else {
+ __ Allocate(obj_size, new_obj, x10, x11, &rt_call, SIZE_IN_WORDS);
+ }
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // NB. the object pointer is not tagged, so MemOperand is used.
+ Register empty = x5;
+ __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(init_map, MemOperand(new_obj, JSObject::kMapOffset));
+ STATIC_ASSERT(JSObject::kElementsOffset ==
+ (JSObject::kPropertiesOffset + kPointerSize));
+ __ Stp(empty, empty, MemOperand(new_obj, JSObject::kPropertiesOffset));
+
+ Register first_prop = x5;
+ __ Add(first_prop, new_obj, JSObject::kHeaderSize);
+
+ // Fill all of the in-object properties with the appropriate filler.
+ Register undef = x7;
+ __ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
+
+ // Obtain number of pre-allocated property fields and in-object
+ // properties.
+ Register prealloc_fields = x10;
+ Register inobject_props = x11;
+ Register inst_sizes = x11;
+ __ Ldr(inst_sizes, FieldMemOperand(init_map, Map::kInstanceSizesOffset));
+ __ Ubfx(prealloc_fields, inst_sizes,
+ Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ Ubfx(inobject_props, inst_sizes,
+ Map::kInObjectPropertiesByte * kBitsPerByte, kBitsPerByte);
+
+ // Calculate number of property fields in the object.
+ Register prop_fields = x6;
+ __ Sub(prop_fields, obj_size, JSObject::kHeaderSize / kPointerSize);
+
+ if (count_constructions) {
+ // Fill the pre-allocated fields with undef.
+ __ FillFields(first_prop, prealloc_fields, undef);
+
+ // Register first_non_prealloc is the offset of the first field after
+ // pre-allocated fields.
+ Register first_non_prealloc = x12;
+ __ Add(first_non_prealloc, first_prop,
+ Operand(prealloc_fields, LSL, kPointerSizeLog2));
+
+ first_prop = NoReg;
+
+ if (FLAG_debug_code) {
+ Register obj_end = x5;
+ __ Add(obj_end, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
+ __ Cmp(first_non_prealloc, obj_end);
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+
+ // Fill the remaining fields with one pointer filler map.
+ Register one_pointer_filler = x5;
+ Register non_prealloc_fields = x6;
+ __ LoadRoot(one_pointer_filler, Heap::kOnePointerFillerMapRootIndex);
+ __ Sub(non_prealloc_fields, prop_fields, prealloc_fields);
+ __ FillFields(first_non_prealloc, non_prealloc_fields,
+ one_pointer_filler);
+ prop_fields = NoReg;
+ } else if (create_memento) {
+ // Fill the pre-allocated fields with undef.
+ __ FillFields(first_prop, prop_fields, undef);
+ __ Add(first_prop, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
+ __ LoadRoot(x14, Heap::kAllocationMementoMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
+ __ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
+ // Load the AllocationSite
+ __ Peek(x14, 2 * kXRegSize);
+ ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
+ __ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
+ first_prop = NoReg;
+ } else {
+ // Fill all of the property fields with undef.
+ __ FillFields(first_prop, prop_fields, undef);
+ first_prop = NoReg;
+ prop_fields = NoReg;
+ }
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on. Any
+ // failures need to undo the allocation, so that the heap is in a
+ // consistent state and verifiable.
+ __ Add(new_obj, new_obj, kHeapObjectTag);
+
+ // Check if a non-empty properties array is needed. Continue with
+ // allocated object if not, or fall through to runtime call if it is.
+ Register element_count = x3;
+ __ Ldrb(element_count,
+ FieldMemOperand(init_map, Map::kUnusedPropertyFieldsOffset));
+ // The field instance sizes contains both pre-allocated property fields
+ // and in-object properties.
+ __ Add(element_count, element_count, prealloc_fields);
+ __ Subs(element_count, element_count, inobject_props);
+
+ // Done if no extra properties are to be allocated.
+ __ B(eq, &allocated);
+ __ Assert(pl, kPropertyAllocationCountFailed);
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ Register new_array = x5;
+ Register array_size = x6;
+ __ Add(array_size, element_count, FixedArray::kHeaderSize / kPointerSize);
+ __ Allocate(array_size, new_array, x11, x12, &undo_allocation,
+ static_cast<AllocationFlags>(RESULT_CONTAINS_TOP |
+ SIZE_IN_WORDS));
+
+ Register array_map = x10;
+ __ LoadRoot(array_map, Heap::kFixedArrayMapRootIndex);
+ __ Str(array_map, MemOperand(new_array, FixedArray::kMapOffset));
+ __ SmiTag(x0, element_count);
+ __ Str(x0, MemOperand(new_array, FixedArray::kLengthOffset));
+
+ // Initialize the fields to undefined.
+ Register elements = x10;
+ __ Add(elements, new_array, FixedArray::kHeaderSize);
+ __ FillFields(elements, element_count, undef);
+
+ // Store the initialized FixedArray into the properties field of the
+ // JSObject.
+ __ Add(new_array, new_array, kHeapObjectTag);
+ __ Str(new_array, FieldMemOperand(new_obj, JSObject::kPropertiesOffset));
+
+ // Continue with JSObject being successfully allocated.
+ __ B(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ __ Bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(new_obj, x14);
+ }
+
+ // Allocate the new receiver object using the runtime call.
+ __ Bind(&rt_call);
+ Label count_incremented;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ Peek(x4, 2 * kXRegSize);
+ __ Push(x4);
+ __ Push(constructor); // Argument for Runtime_NewObject.
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ __ Mov(x4, x0);
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ __ jmp(&count_incremented);
+ } else {
+ __ Push(constructor); // Argument for Runtime_NewObject.
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ __ Mov(x4, x0);
+ }
+
+ // Receiver for constructor call allocated.
+ // x4: JSObject
+ __ Bind(&allocated);
+
+ if (create_memento) {
+ __ Peek(x10, 2 * kXRegSize);
+ __ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &count_incremented);
+ // r2 is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ Ldr(x5, FieldMemOperand(x10,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ Add(x5, x5, Operand(Smi::FromInt(1)));
+ __ Str(x5, FieldMemOperand(x10,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ bind(&count_incremented);
+ }
+
+ __ Push(x4, x4);
+
+ // Reload the number of arguments from the stack.
+ // Set it up in x0 for the function call below.
+ // jssp[0]: receiver
+ // jssp[1]: receiver
+ // jssp[2]: constructor function
+ // jssp[3]: number of arguments (smi-tagged)
+ __ Peek(constructor, 2 * kXRegSize); // Load constructor.
+ __ Peek(argc, 3 * kXRegSize); // Load number of arguments.
+ __ SmiUntag(argc);
+
+ // Set up pointer to last argument.
+ __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
+
+ // Copy arguments and receiver to the expression stack.
+ // Copy 2 values every loop to use ldp/stp.
+ // x0: number of arguments
+ // x1: constructor function
+ // x2: address of last argument (caller sp)
+ // jssp[0]: receiver
+ // jssp[1]: receiver
+ // jssp[2]: constructor function
+ // jssp[3]: number of arguments (smi-tagged)
+ // Compute the start address of the copy in x3.
+ __ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2));
+ Label loop, entry, done_copying_arguments;
+ __ B(&entry);
+ __ Bind(&loop);
+ __ Ldp(x10, x11, MemOperand(x3, -2 * kPointerSize, PreIndex));
+ __ Push(x11, x10);
+ __ Bind(&entry);
+ __ Cmp(x3, x2);
+ __ B(gt, &loop);
+ // Because we copied values 2 by 2 we may have copied one extra value.
+ // Drop it if that is the case.
+ __ B(eq, &done_copying_arguments);
+ __ Drop(1);
+ __ Bind(&done_copying_arguments);
+
+ // Call the function.
+ // x0: number of arguments
+ // x1: constructor function
+ if (is_api_function) {
+ __ Ldr(cp, FieldMemOperand(constructor, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ParameterCount actual(argc);
+ __ InvokeFunction(constructor, actual, CALL_FUNCTION, NullCallWrapper());
+ }
+
+ // Store offset of return address for deoptimizer.
+ if (!is_api_function && !count_constructions) {
+ masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore the context from the frame.
+ // x0: result
+ // jssp[0]: receiver
+ // jssp[1]: constructor function
+ // jssp[2]: number of arguments (smi-tagged)
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // x0: result
+ // jssp[0]: receiver (newly allocated object)
+ // jssp[1]: constructor function
+ // jssp[2]: number of arguments (smi-tagged)
+ __ JumpIfSmi(x0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ JumpIfObjectType(x0, x1, x3, FIRST_SPEC_OBJECT_TYPE, &exit, ge);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ Bind(&use_receiver);
+ __ Peek(x0, 0);
+
+ // Remove the receiver from the stack, remove caller arguments, and
+ // return.
+ __ Bind(&exit);
+ // x0: result
+ // jssp[0]: receiver (newly allocated object)
+ // jssp[1]: constructor function
+ // jssp[2]: number of arguments (smi-tagged)
+ __ Peek(x1, 2 * kXRegSize);
+
+ // Leave construct frame.
+ }
+
+ __ DropBySMI(x1);
+ __ Drop(1);
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, x1, x2);
+ __ Ret();
+}
+
+
+void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, true, false);
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, true, false, false);
+}
+
+
+// Input:
+// x0: code entry.
+// x1: function.
+// x2: receiver.
+// x3: argc.
+// x4: argv.
+// Output:
+// x0: result.
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ // Called from JSEntryStub::GenerateBody().
+ Register function = x1;
+ Register receiver = x2;
+ Register argc = x3;
+ Register argv = x4;
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Clear the context before we push it when entering the internal frame.
+ __ Mov(cp, 0);
+
+ {
+ // Enter an internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Set up the context from the function argument.
+ __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+
+ __ InitializeRootRegister();
+
+ // Push the function and the receiver onto the stack.
+ __ Push(function, receiver);
+
+ // Copy arguments to the stack in a loop, in reverse order.
+ // x3: argc.
+ // x4: argv.
+ Label loop, entry;
+ // Compute the copy end address.
+ __ Add(x10, argv, Operand(argc, LSL, kPointerSizeLog2));
+
+ __ B(&entry);
+ __ Bind(&loop);
+ __ Ldr(x11, MemOperand(argv, kPointerSize, PostIndex));
+ __ Ldr(x12, MemOperand(x11)); // Dereference the handle.
+ __ Push(x12); // Push the argument.
+ __ Bind(&entry);
+ __ Cmp(x10, argv);
+ __ B(ne, &loop);
+
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ // The original values have been saved in JSEntryStub::GenerateBody().
+ __ LoadRoot(x19, Heap::kUndefinedValueRootIndex);
+ __ Mov(x20, x19);
+ __ Mov(x21, x19);
+ __ Mov(x22, x19);
+ __ Mov(x23, x19);
+ __ Mov(x24, x19);
+ __ Mov(x25, x19);
+ // Don't initialize the reserved registers.
+ // x26 : root register (root).
+ // x27 : context pointer (cp).
+ // x28 : JS stack pointer (jssp).
+ // x29 : frame pointer (fp).
+
+ __ Mov(x0, argc);
+ if (is_construct) {
+ // No type feedback cell is available.
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ __ CallStub(&stub);
+ } else {
+ ParameterCount actual(x0);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, NullCallWrapper());
+ }
+ // Exit the JS internal frame and remove the parameters (except function),
+ // and return.
+ }
+
+ // Result is in x0. Return.
+ __ Ret();
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ Register function = x1;
+
+ // Preserve function. At the same time, push arguments for
+ // kHiddenCompileOptimized.
+ __ LoadObject(x10, masm->isolate()->factory()->ToBoolean(concurrent));
+ __ Push(function, function, x10);
+
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
+
+ // Restore receiver.
+ __ Pop(function);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ CallCompileOptimized(masm, false);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ CallCompileOptimized(masm, true);
+ GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code fast, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // The following caller-saved registers must be saved and restored when
+ // calling through to the runtime:
+ // x0 - The address from which to resume execution.
+ // x1 - isolate
+ // lr - The return address for the JSFunction itself. It has not yet been
+ // preserved on the stack because the frame setup code was replaced
+ // with a call to this stub, to handle code ageing.
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(x0, x1, fp, lr);
+ __ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+ __ Pop(lr, fp, x1, x0);
+ }
+
+ // The calling function has been made young again, so return to execute the
+ // real frame set-up code.
+ __ Br(x0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+
+ // The following caller-saved registers must be saved and restored when
+ // calling through to the runtime:
+ // x0 - The address from which to resume execution.
+ // x1 - isolate
+ // lr - The return address for the JSFunction itself. It has not yet been
+ // preserved on the stack because the frame setup code was replaced
+ // with a call to this stub, to handle code ageing.
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(x0, x1, fp, lr);
+ __ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(
+ masm->isolate()), 2);
+ __ Pop(lr, fp, x1, x0);
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ EmitFrameSetupForCodeAgePatching(masm);
+ }
+
+ // Jump to point after the code-age stub.
+ __ Add(x0, x0, kCodeAgeSequenceSize);
+ __ Br(x0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ // TODO(jbramley): Is it correct (and appropriate) to use safepoint
+ // registers here? According to the comment above, we should only need to
+ // preserve the registers with parameters.
+ __ PushXRegList(kSafepointSavedRegisters);
+ // Pass the function and deoptimization type to the runtime system.
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
+ __ PopXRegList(kSafepointSavedRegisters);
+ }
+
+ // Ignore state (pushed by Deoptimizer::EntryGenerator::Generate).
+ __ Drop(1);
+
+ // Jump to the miss handler. Deoptimizer::EntryGenerator::Generate loads this
+ // into lr before it jumps here.
+ __ Br(lr);
+}
+
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+ Deoptimizer::BailoutType type) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass the deoptimization type to the runtime system.
+ __ Mov(x0, Smi::FromInt(static_cast<int>(type)));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
+ }
+
+ // Get the full codegen state from the stack and untag it.
+ Register state = x6;
+ __ Peek(state, 0);
+ __ SmiUntag(state);
+
+ // Switch on the state.
+ Label with_tos_register, unknown_state;
+ __ CompareAndBranch(
+ state, FullCodeGenerator::NO_REGISTERS, ne, &with_tos_register);
+ __ Drop(1); // Remove state.
+ __ Ret();
+
+ __ Bind(&with_tos_register);
+ // Reload TOS register.
+ __ Peek(x0, kPointerSize);
+ __ CompareAndBranch(state, FullCodeGenerator::TOS_REG, ne, &unknown_state);
+ __ Drop(2); // Remove state and TOS.
+ __ Ret();
+
+ __ Bind(&unknown_state);
+ __ Abort(kInvalidFullCodegenState);
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ // Lookup the function in the JavaScript frame.
+ __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass function as argument.
+ __ Push(x0);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ }
+
+ // If the code object is null, just return to the unoptimized code.
+ Label skip;
+ __ CompareAndBranch(x0, Smi::FromInt(0), ne, &skip);
+ __ Ret();
+
+ __ Bind(&skip);
+
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ Ldr(x1, MemOperand(x0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ Ldrsw(w1, UntagSmiFieldMemOperand(x1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex)));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ Add(x0, x0, x1);
+ __ Add(lr, x0, Code::kHeaderSize - kHeapObjectTag);
+
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+}
+
+
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
+ }
+ __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ Bind(&ok);
+ __ Ret();
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+ enum {
+ call_type_JS_func = 0,
+ call_type_func_proxy = 1,
+ call_type_non_func = 2
+ };
+ Register argc = x0;
+ Register function = x1;
+ Register call_type = x4;
+ Register scratch1 = x10;
+ Register scratch2 = x11;
+ Register receiver_type = x13;
+
+ ASM_LOCATION("Builtins::Generate_FunctionCall");
+ // 1. Make sure we have at least one argument.
+ { Label done;
+ __ Cbnz(argc, &done);
+ __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
+ __ Push(scratch1);
+ __ Mov(argc, 1);
+ __ Bind(&done);
+ }
+
+ // 2. Get the function to call (passed as receiver) from the stack, check
+ // if it is a function.
+ Label slow, non_function;
+ __ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
+ __ JumpIfSmi(function, &non_function);
+ __ JumpIfNotObjectType(function, scratch1, receiver_type,
+ JS_FUNCTION_TYPE, &slow);
+
+ // 3a. Patch the first argument if necessary when calling a function.
+ Label shift_arguments;
+ __ Mov(call_type, static_cast<int>(call_type_JS_func));
+ { Label convert_to_object, use_global_receiver, patch_receiver;
+ // Change context eagerly in case we need the global receiver.
+ __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ // Also do not transform the receiver for native (Compilerhints already in
+ // x3).
+ __ Ldr(scratch1,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(scratch2.W(),
+ FieldMemOperand(scratch1, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestAndBranchIfAnySet(
+ scratch2.W(),
+ (1 << SharedFunctionInfo::kStrictModeFunction) |
+ (1 << SharedFunctionInfo::kNative),
+ &shift_arguments);
+
+ // Compute the receiver in sloppy mode.
+ Register receiver = x2;
+ __ Sub(scratch1, argc, 1);
+ __ Peek(receiver, Operand(scratch1, LSL, kXRegSizeLog2));
+ __ JumpIfSmi(receiver, &convert_to_object);
+
+ __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
+ &use_global_receiver);
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver);
+
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(receiver, scratch1, scratch2,
+ FIRST_SPEC_OBJECT_TYPE, &shift_arguments, ge);
+
+ __ Bind(&convert_to_object);
+
+ {
+ // Enter an internal frame in order to preserve argument count.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(argc);
+
+ __ Push(argc, receiver);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Mov(receiver, x0);
+
+ __ Pop(argc);
+ __ SmiUntag(argc);
+
+ // Exit the internal frame.
+ }
+
+ // Restore the function and flag in the registers.
+ __ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
+ __ Mov(call_type, static_cast<int>(call_type_JS_func));
+ __ B(&patch_receiver);
+
+ __ Bind(&use_global_receiver);
+ __ Ldr(receiver, GlobalObjectMemOperand());
+ __ Ldr(receiver,
+ FieldMemOperand(receiver, GlobalObject::kGlobalReceiverOffset));
+
+
+ __ Bind(&patch_receiver);
+ __ Sub(scratch1, argc, 1);
+ __ Poke(receiver, Operand(scratch1, LSL, kXRegSizeLog2));
+
+ __ B(&shift_arguments);
+ }
+
+ // 3b. Check for function proxy.
+ __ Bind(&slow);
+ __ Mov(call_type, static_cast<int>(call_type_func_proxy));
+ __ Cmp(receiver_type, JS_FUNCTION_PROXY_TYPE);
+ __ B(eq, &shift_arguments);
+ __ Bind(&non_function);
+ __ Mov(call_type, static_cast<int>(call_type_non_func));
+
+ // 3c. Patch the first argument when calling a non-function. The
+ // CALL_NON_FUNCTION builtin expects the non-function callee as
+ // receiver, so overwrite the first argument which will ultimately
+ // become the receiver.
+ // call type (0: JS function, 1: function proxy, 2: non-function)
+ __ Sub(scratch1, argc, 1);
+ __ Poke(function, Operand(scratch1, LSL, kXRegSizeLog2));
+
+ // 4. Shift arguments and return address one slot down on the stack
+ // (overwriting the original receiver). Adjust argument count to make
+ // the original first argument the new receiver.
+ // call type (0: JS function, 1: function proxy, 2: non-function)
+ __ Bind(&shift_arguments);
+ { Label loop;
+ // Calculate the copy start address (destination). Copy end address is jssp.
+ __ Add(scratch2, jssp, Operand(argc, LSL, kPointerSizeLog2));
+ __ Sub(scratch1, scratch2, kPointerSize);
+
+ __ Bind(&loop);
+ __ Ldr(x12, MemOperand(scratch1, -kPointerSize, PostIndex));
+ __ Str(x12, MemOperand(scratch2, -kPointerSize, PostIndex));
+ __ Cmp(scratch1, jssp);
+ __ B(ge, &loop);
+ // Adjust the actual number of arguments and remove the top element
+ // (which is a copy of the last argument).
+ __ Sub(argc, argc, 1);
+ __ Drop(1);
+ }
+
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+ // or a function proxy via CALL_FUNCTION_PROXY.
+ // call type (0: JS function, 1: function proxy, 2: non-function)
+ { Label js_function, non_proxy;
+ __ Cbz(call_type, &js_function);
+ // Expected number of arguments is 0 for CALL_NON_FUNCTION.
+ __ Mov(x2, 0);
+ __ Cmp(call_type, static_cast<int>(call_type_func_proxy));
+ __ B(ne, &non_proxy);
+
+ __ Push(function); // Re-add proxy object as additional argument.
+ __ Add(argc, argc, 1);
+ __ GetBuiltinFunction(function, Builtins::CALL_FUNCTION_PROXY);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+
+ __ Bind(&non_proxy);
+ __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ __ Bind(&js_function);
+ }
+
+ // 5b. Get the code to call from the function and check that the number of
+ // expected arguments matches what we're providing. If so, jump
+ // (tail-call) to the code in register edx without checking arguments.
+ __ Ldr(x3, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldrsw(x2,
+ FieldMemOperand(x3,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ Label dont_adapt_args;
+ __ Cmp(x2, argc); // Check formal and actual parameter counts.
+ __ B(eq, &dont_adapt_args);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ __ Bind(&dont_adapt_args);
+
+ __ Ldr(x3, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ ParameterCount expected(0);
+ __ InvokeCode(x3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ ASM_LOCATION("Builtins::Generate_FunctionApply");
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kArgsOffset = 2 * kPointerSize;
+ const int kReceiverOffset = 3 * kPointerSize;
+ const int kFunctionOffset = 4 * kPointerSize;
+
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+
+ Register args = x12;
+ Register receiver = x14;
+ Register function = x15;
+
+ // Get the length of the arguments via a builtin call.
+ __ Ldr(function, MemOperand(fp, kFunctionOffset));
+ __ Ldr(args, MemOperand(fp, kArgsOffset));
+ __ Push(function, args);
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ Register argc = x0;
+
+ // Check the stack for overflow.
+ // We are not trying to catch interruptions (e.g. debug break and
+ // preemption) here, so the "real stack limit" is checked.
+ Label enough_stack_space;
+ __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
+ __ Ldr(function, MemOperand(fp, kFunctionOffset));
+ // Make x10 the space we have left. The stack might already be overflowed
+ // here which will cause x10 to become negative.
+ // TODO(jbramley): Check that the stack usage here is safe.
+ __ Sub(x10, jssp, x10);
+ // Check if the arguments will overflow the stack.
+ __ Cmp(x10, Operand(argc, LSR, kSmiShift - kPointerSizeLog2));
+ __ B(gt, &enough_stack_space);
+ // There is not enough stack space, so use a builtin to throw an appropriate
+ // error.
+ __ Push(function, argc);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ // We should never return from the APPLY_OVERFLOW builtin.
+ if (__ emit_debug_code()) {
+ __ Unreachable();
+ }
+
+ __ Bind(&enough_stack_space);
+ // Push current limit and index.
+ __ Mov(x1, 0); // Initial index.
+ __ Push(argc, x1);
+
+ Label push_receiver;
+ __ Ldr(receiver, MemOperand(fp, kReceiverOffset));
+
+ // Check that the function is a JS function. Otherwise it must be a proxy.
+ // When it is not the function proxy will be invoked later.
+ __ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE,
+ &push_receiver);
+
+ // Change context eagerly to get the right global object if necessary.
+ __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+ // Load the shared function info.
+ __ Ldr(x2, FieldMemOperand(function,
+ JSFunction::kSharedFunctionInfoOffset));
+
+ // Compute and push the receiver.
+ // Do not transform the receiver for strict mode functions.
+ Label convert_receiver_to_object, use_global_receiver;
+ __ Ldr(w10, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Tbnz(x10, SharedFunctionInfo::kStrictModeFunction, &push_receiver);
+ // Do not transform the receiver for native functions.
+ __ Tbnz(x10, SharedFunctionInfo::kNative, &push_receiver);
+
+ // Compute the receiver in sloppy mode.
+ __ JumpIfSmi(receiver, &convert_receiver_to_object);
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver);
+ __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
+ &use_global_receiver);
+
+ // Check if the receiver is already a JavaScript object.
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(receiver, x10, x11, FIRST_SPEC_OBJECT_TYPE,
+ &push_receiver, ge);
+
+ // Call a builtin to convert the receiver to a regular object.
+ __ Bind(&convert_receiver_to_object);
+ __ Push(receiver);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Mov(receiver, x0);
+ __ B(&push_receiver);
+
+ __ Bind(&use_global_receiver);
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(receiver, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver
+ __ Bind(&push_receiver);
+ __ Push(receiver);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ Register current = x0;
+ __ Ldr(current, MemOperand(fp, kIndexOffset));
+ __ B(&entry);
+
+ __ Bind(&loop);
+ // Load the current argument from the arguments array and push it.
+ // TODO(all): Couldn't we optimize this for JS arrays?
+
+ __ Ldr(x1, MemOperand(fp, kArgsOffset));
+ __ Push(x1, current);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ Push(x0);
+
+ // Use inline caching to access the arguments.
+ __ Ldr(current, MemOperand(fp, kIndexOffset));
+ __ Add(current, current, Smi::FromInt(1));
+ __ Str(current, MemOperand(fp, kIndexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ Bind(&entry);
+ __ Ldr(x1, MemOperand(fp, kLimitOffset));
+ __ Cmp(current, x1);
+ __ B(ne, &loop);
+
+ // At the end of the loop, the number of arguments is stored in 'current',
+ // represented as a smi.
+
+ function = x1; // From now on we want the function to be kept in x1;
+ __ Ldr(function, MemOperand(fp, kFunctionOffset));
+
+ // Call the function.
+ Label call_proxy;
+ ParameterCount actual(current);
+ __ SmiUntag(current);
+ __ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE, &call_proxy);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, NullCallWrapper());
+ frame_scope.GenerateLeaveFrame();
+ __ Drop(3);
+ __ Ret();
+
+ // Call the function proxy.
+ __ Bind(&call_proxy);
+ // x0 : argc
+ // x1 : function
+ __ Push(function); // Add function proxy as last argument.
+ __ Add(x0, x0, 1);
+ __ Mov(x2, 0);
+ __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
+ __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ }
+ __ Drop(3);
+ __ Ret();
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+ __ SmiTag(x10, x0);
+ __ Mov(x11, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Push(lr, fp);
+ __ Push(x11, x1, x10);
+ __ Add(fp, jssp,
+ StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : result being passed through
+ // -----------------------------------
+ // Get the number of arguments passed (as a smi), tear down the frame and
+ // then drop the parameters and the receiver.
+ __ Ldr(x10, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize)));
+ __ Mov(jssp, fp);
+ __ Pop(fp, lr);
+ __ DropBySMI(x10, kXRegSize);
+ __ Drop(1);
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+ ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
+ // ----------- S t a t e -------------
+ // -- x0 : actual number of arguments
+ // -- x1 : function (passed through to callee)
+ // -- x2 : expected number of arguments
+ // -----------------------------------
+
+ Register argc_actual = x0; // Excluding the receiver.
+ Register argc_expected = x2; // Excluding the receiver.
+ Register function = x1;
+ Register code_entry = x3;
+
+ Label invoke, dont_adapt_arguments;
+
+ Label enough, too_few;
+ __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ __ Cmp(argc_actual, argc_expected);
+ __ B(lt, &too_few);
+ __ Cmp(argc_expected, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+ __ B(eq, &dont_adapt_arguments);
+
+ { // Enough parameters: actual >= expected
+ EnterArgumentsAdaptorFrame(masm);
+
+ Register copy_start = x10;
+ Register copy_end = x11;
+ Register copy_to = x12;
+ Register scratch1 = x13, scratch2 = x14;
+
+ __ Lsl(argc_expected, argc_expected, kPointerSizeLog2);
+
+ // Adjust for fp, lr, and the receiver.
+ __ Add(copy_start, fp, 3 * kPointerSize);
+ __ Add(copy_start, copy_start, Operand(argc_actual, LSL, kPointerSizeLog2));
+ __ Sub(copy_end, copy_start, argc_expected);
+ __ Sub(copy_end, copy_end, kPointerSize);
+ __ Mov(copy_to, jssp);
+
+ // Claim space for the arguments, the receiver, and one extra slot.
+ // The extra slot ensures we do not write under jssp. It will be popped
+ // later.
+ __ Add(scratch1, argc_expected, 2 * kPointerSize);
+ __ Claim(scratch1, 1);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ Label copy_2_by_2;
+ __ Bind(&copy_2_by_2);
+ __ Ldp(scratch1, scratch2,
+ MemOperand(copy_start, - 2 * kPointerSize, PreIndex));
+ __ Stp(scratch1, scratch2,
+ MemOperand(copy_to, - 2 * kPointerSize, PreIndex));
+ __ Cmp(copy_start, copy_end);
+ __ B(hi, &copy_2_by_2);
+
+ // Correct the space allocated for the extra slot.
+ __ Drop(1);
+
+ __ B(&invoke);
+ }
+
+ { // Too few parameters: Actual < expected
+ __ Bind(&too_few);
+ EnterArgumentsAdaptorFrame(masm);
+
+ Register copy_from = x10;
+ Register copy_end = x11;
+ Register copy_to = x12;
+ Register scratch1 = x13, scratch2 = x14;
+
+ __ Lsl(argc_expected, argc_expected, kPointerSizeLog2);
+ __ Lsl(argc_actual, argc_actual, kPointerSizeLog2);
+
+ // Adjust for fp, lr, and the receiver.
+ __ Add(copy_from, fp, 3 * kPointerSize);
+ __ Add(copy_from, copy_from, argc_actual);
+ __ Mov(copy_to, jssp);
+ __ Sub(copy_end, copy_to, 1 * kPointerSize); // Adjust for the receiver.
+ __ Sub(copy_end, copy_end, argc_actual);
+
+ // Claim space for the arguments, the receiver, and one extra slot.
+ // The extra slot ensures we do not write under jssp. It will be popped
+ // later.
+ __ Add(scratch1, argc_expected, 2 * kPointerSize);
+ __ Claim(scratch1, 1);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ Label copy_2_by_2;
+ __ Bind(&copy_2_by_2);
+ __ Ldp(scratch1, scratch2,
+ MemOperand(copy_from, - 2 * kPointerSize, PreIndex));
+ __ Stp(scratch1, scratch2,
+ MemOperand(copy_to, - 2 * kPointerSize, PreIndex));
+ __ Cmp(copy_to, copy_end);
+ __ B(hi, &copy_2_by_2);
+
+ __ Mov(copy_to, copy_end);
+
+ // Fill the remaining expected arguments with undefined.
+ __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
+ __ Add(copy_end, jssp, kPointerSize);
+
+ Label fill;
+ __ Bind(&fill);
+ __ Stp(scratch1, scratch1,
+ MemOperand(copy_to, - 2 * kPointerSize, PreIndex));
+ __ Cmp(copy_to, copy_end);
+ __ B(hi, &fill);
+
+ // Correct the space allocated for the extra slot.
+ __ Drop(1);
+ }
+
+ // Arguments have been adapted. Now call the entry point.
+ __ Bind(&invoke);
+ __ Call(code_entry);
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+ // Exit frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ Ret();
+
+ // Call the entry point without adapting the arguments.
+ __ Bind(&dont_adapt_arguments);
+ __ Jump(code_entry);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
new file mode 100644
index 000000000..b097fc52e
--- /dev/null
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -0,0 +1,5743 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "bootstrapper.h"
+#include "code-stubs.h"
+#include "regexp-macro-assembler.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: function info
+ static Register registers[] = { x2 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
+}
+
+
+void FastNewContextStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: function
+ static Register registers[] = { x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void ToNumberStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
+}
+
+
+void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x3: array literals array
+ // x2: array literal index
+ // x1: constant elements
+ static Register registers[] = { x3, x2, x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
+}
+
+
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x3: object literals array
+ // x2: object literal index
+ // x1: constant properties
+ // x0: object literal flags
+ static Register registers[] = { x3, x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
+}
+
+
+void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: feedback vector
+ // x3: call feedback slot
+ static Register registers[] = { x2, x3 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ // x0: key
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ // x0: key
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void RegExpConstructResultStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: length
+ // x1: index (of last match)
+ // x0: string
+ static Register registers[] = { x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
+}
+
+
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: receiver
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ static Register registers[] = { x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void StringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { x0, x2 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: receiver
+ // x1: key
+ // x0: value
+ static Register registers[] = { x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
+}
+
+
+void TransitionElementsKindStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value (js_array)
+ // x1: to_map
+ static Register registers[] = { x0, x1 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ Address entry =
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
+}
+
+
+void CompareNilICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value to compare
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(CompareNilIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
+}
+
+
+static void InitializeArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // x1: function
+ // x2: allocation site with elements kind
+ // x0: number of arguments to the constructor function
+ static Register registers_variable_args[] = { x1, x2, x0 };
+ static Register registers_no_args[] = { x1, x2 };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ =
+ sizeof(registers_no_args) / sizeof(registers_no_args[0]);
+ descriptor->register_params_ = registers_no_args;
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = x0;
+ descriptor->register_param_count_ =
+ sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
+ descriptor->register_params_ = registers_variable_args;
+ }
+
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
+static void InitializeInternalArrayConstructorDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor,
+ int constant_stack_parameter_count) {
+ // x1: constructor function
+ // x0: number of arguments to the constructor function
+ static Register registers_variable_args[] = { x1, x0 };
+ static Register registers_no_args[] = { x1 };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ =
+ sizeof(registers_no_args) / sizeof(registers_no_args[0]);
+ descriptor->register_params_ = registers_no_args;
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = x0;
+ descriptor->register_param_count_ =
+ sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
+ descriptor->register_params_ = registers_variable_args;
+ }
+
+ descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
+ descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
+}
+
+
+void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
+}
+
+
+void ToBooleanStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ static Register registers[] = { x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
+}
+
+
+void StoreGlobalStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: receiver
+ // x2: key (unused)
+ // x0: value
+ static Register registers[] = { x1, x2, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(StoreIC_MissFromStubFailure);
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x0: value
+ // x3: target map
+ // x1: key
+ // x2: receiver
+ static Register registers[] = { x0, x3, x1, x2 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
+void BinaryOpICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: left operand
+ // x0: right operand
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
+void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x2: allocation site
+ // x1: left operand
+ // x0: right operand
+ static Register registers[] = { x2, x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
+}
+
+
+void StringAddStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // x1: left operand
+ // x0: right operand
+ static Register registers[] = { x1, x0 };
+ descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
+}
+
+
+void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
+ static PlatformCallInterfaceDescriptor default_descriptor =
+ PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+ static PlatformCallInterfaceDescriptor noInlineDescriptor =
+ PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
+ static Register registers[] = { x1, // JSFunction
+ cp, // context
+ x0, // actual number of arguments
+ x2, // expected number of arguments
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // JSFunction
+ Representation::Tagged(), // context
+ Representation::Integer32(), // actual number of arguments
+ Representation::Integer32(), // expected number of arguments
+ };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::KeyedCall);
+ static Register registers[] = { cp, // context
+ x2, // key
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // key
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::NamedCall);
+ static Register registers[] = { cp, // context
+ x2, // name
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // name
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::CallHandler);
+ static Register registers[] = { cp, // context
+ x0, // receiver
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // context
+ Representation::Tagged(), // receiver
+ };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+ {
+ CallInterfaceDescriptor* descriptor =
+ isolate->call_descriptor(Isolate::ApiFunctionCall);
+ static Register registers[] = { x0, // callee
+ x4, // call_data
+ x2, // holder
+ x1, // api_function_address
+ cp, // context
+ };
+ static Representation representations[] = {
+ Representation::Tagged(), // callee
+ Representation::Tagged(), // call_data
+ Representation::Tagged(), // holder
+ Representation::External(), // api_function_address
+ Representation::Tagged(), // context
+ };
+ descriptor->register_param_count_ = 5;
+ descriptor->register_params_ = registers;
+ descriptor->param_representations_ = representations;
+ descriptor->platform_specific_descriptor_ = &default_descriptor;
+ }
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+ // Update the static counter each time a new code stub is generated.
+ Isolate* isolate = masm->isolate();
+ isolate->counters()->code_stubs()->Increment();
+
+ CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
+ int param_count = descriptor->register_param_count_;
+ {
+ // Call the runtime system in a fresh internal frame.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ASSERT((descriptor->register_param_count_ == 0) ||
+ x0.Is(descriptor->register_params_[param_count - 1]));
+
+ // Push arguments
+ MacroAssembler::PushPopQueue queue(masm);
+ for (int i = 0; i < param_count; ++i) {
+ queue.Queue(descriptor->register_params_[i]);
+ }
+ queue.PushQueued();
+
+ ExternalReference miss = descriptor->miss_handler();
+ __ CallExternalReference(miss, descriptor->register_param_count_);
+ }
+
+ __ Ret();
+}
+
+
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Label done;
+ Register input = source();
+ Register result = destination();
+ ASSERT(is_truncating());
+
+ ASSERT(result.Is64Bits());
+ ASSERT(jssp.Is(masm->StackPointer()));
+
+ int double_offset = offset();
+
+ DoubleRegister double_scratch = d0; // only used if !skip_fastpath()
+ Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
+ Register scratch2 =
+ GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
+
+ __ Push(scratch1, scratch2);
+ // Account for saved regs if input is jssp.
+ if (input.is(jssp)) double_offset += 2 * kPointerSize;
+
+ if (!skip_fastpath()) {
+ __ Push(double_scratch);
+ if (input.is(jssp)) double_offset += 1 * kDoubleSize;
+ __ Ldr(double_scratch, MemOperand(input, double_offset));
+ // Try to convert with a FPU convert instruction. This handles all
+ // non-saturating cases.
+ __ TryConvertDoubleToInt64(result, double_scratch, &done);
+ __ Fmov(result, double_scratch);
+ } else {
+ __ Ldr(result, MemOperand(input, double_offset));
+ }
+
+ // If we reach here we need to manually convert the input to an int32.
+
+ // Extract the exponent.
+ Register exponent = scratch1;
+ __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
+ HeapNumber::kExponentBits);
+
+ // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
+ // the mantissa gets shifted completely out of the int32_t result.
+ __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
+ __ CzeroX(result, ge);
+ __ B(ge, &done);
+
+ // The Fcvtzs sequence handles all cases except where the conversion causes
+ // signed overflow in the int64_t target. Since we've already handled
+ // exponents >= 84, we can guarantee that 63 <= exponent < 84.
+
+ if (masm->emit_debug_code()) {
+ __ Cmp(exponent, HeapNumber::kExponentBias + 63);
+ // Exponents less than this should have been handled by the Fcvt case.
+ __ Check(ge, kUnexpectedValue);
+ }
+
+ // Isolate the mantissa bits, and set the implicit '1'.
+ Register mantissa = scratch2;
+ __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
+ __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
+
+ // Negate the mantissa if necessary.
+ __ Tst(result, kXSignMask);
+ __ Cneg(mantissa, mantissa, ne);
+
+ // Shift the mantissa bits in the correct place. We know that we have to shift
+ // it left here, because exponent >= 63 >= kMantissaBits.
+ __ Sub(exponent, exponent,
+ HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
+ __ Lsl(result, mantissa, exponent);
+
+ __ Bind(&done);
+ if (!skip_fastpath()) {
+ __ Pop(double_scratch);
+ }
+ __ Pop(scratch2, scratch1);
+ __ Ret();
+}
+
+
+// See call site for description.
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch,
+ FPRegister double_scratch,
+ Label* slow,
+ Condition cond) {
+ ASSERT(!AreAliased(left, right, scratch));
+ Label not_identical, return_equal, heap_number;
+ Register result = x0;
+
+ __ Cmp(right, left);
+ __ B(ne, &not_identical);
+
+ // Test for NaN. Sadly, we can't just compare to factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if ((cond == lt) || (cond == gt)) {
+ __ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow,
+ ge);
+ } else {
+ Register right_type = scratch;
+ __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
+ &heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cond != eq) {
+ __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if ((cond == le) || (cond == ge)) {
+ __ Cmp(right_type, ODDBALL_TYPE);
+ __ B(ne, &return_equal);
+ __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
+ if (cond == le) {
+ // undefined <= undefined should fail.
+ __ Mov(result, GREATER);
+ } else {
+ // undefined >= undefined should fail.
+ __ Mov(result, LESS);
+ }
+ __ Ret();
+ }
+ }
+ }
+
+ __ Bind(&return_equal);
+ if (cond == lt) {
+ __ Mov(result, GREATER); // Things aren't less than themselves.
+ } else if (cond == gt) {
+ __ Mov(result, LESS); // Things aren't greater than themselves.
+ } else {
+ __ Mov(result, EQUAL); // Things are <=, >=, ==, === themselves.
+ }
+ __ Ret();
+
+ // Cases lt and gt have been handled earlier, and case ne is never seen, as
+ // it is handled in the parser (see Parser::ParseBinaryExpression). We are
+ // only concerned with cases ge, le and eq here.
+ if ((cond != lt) && (cond != gt)) {
+ ASSERT((cond == ge) || (cond == le) || (cond == eq));
+ __ Bind(&heap_number);
+ // Left and right are identical pointers to a heap number object. Return
+ // non-equal if the heap number is a NaN, and equal otherwise. Comparing
+ // the number to itself will set the overflow flag iff the number is NaN.
+ __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset));
+ __ Fcmp(double_scratch, double_scratch);
+ __ B(vc, &return_equal); // Not NaN, so treat as normal heap number.
+
+ if (cond == le) {
+ __ Mov(result, GREATER);
+ } else {
+ __ Mov(result, LESS);
+ }
+ __ Ret();
+ }
+
+ // No fall through here.
+ if (FLAG_debug_code) {
+ __ Unreachable();
+ }
+
+ __ Bind(&not_identical);
+}
+
+
+// See call site for description.
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register left_type,
+ Register right_type,
+ Register scratch) {
+ ASSERT(!AreAliased(left, right, left_type, right_type, scratch));
+
+ if (masm->emit_debug_code()) {
+ // We assume that the arguments are not identical.
+ __ Cmp(left, right);
+ __ Assert(ne, kExpectedNonIdenticalObjects);
+ }
+
+ // If either operand is a JS object or an oddball value, then they are not
+ // equal since their pointers are different.
+ // There is no test for undetectability in strict equality.
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ Label right_non_object;
+
+ __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ B(lt, &right_non_object);
+
+ // Return non-zero - x0 already contains a non-zero pointer.
+ ASSERT(left.is(x0) || right.is(x0));
+ Label return_not_equal;
+ __ Bind(&return_not_equal);
+ __ Ret();
+
+ __ Bind(&right_non_object);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ Cmp(right_type, ODDBALL_TYPE);
+
+ // If right is not ODDBALL, test left. Otherwise, set eq condition.
+ __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
+
+ // If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE.
+ // Otherwise, right or left is ODDBALL, so set a ge condition.
+ __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne);
+
+ __ B(ge, &return_not_equal);
+
+ // Internalized strings are unique, so they can only be equal if they are the
+ // same object. We have already tested that case, so if left and right are
+ // both internalized strings, they cannot be equal.
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ __ Orr(scratch, left_type, right_type);
+ __ TestAndBranchIfAllClear(
+ scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal);
+}
+
+
+// See call site for description.
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register left,
+ Register right,
+ FPRegister left_d,
+ FPRegister right_d,
+ Register scratch,
+ Label* slow,
+ bool strict) {
+ ASSERT(!AreAliased(left, right, scratch));
+ ASSERT(!AreAliased(left_d, right_d));
+ ASSERT((left.is(x0) && right.is(x1)) ||
+ (right.is(x0) && left.is(x1)));
+ Register result = x0;
+
+ Label right_is_smi, done;
+ __ JumpIfSmi(right, &right_is_smi);
+
+ // Left is the smi. Check whether right is a heap number.
+ if (strict) {
+ // If right is not a number and left is a smi, then strict equality cannot
+ // succeed. Return non-equal.
+ Label is_heap_number;
+ __ JumpIfObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE,
+ &is_heap_number);
+ // Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
+ if (!right.is(result)) {
+ __ Mov(result, NOT_EQUAL);
+ }
+ __ Ret();
+ __ Bind(&is_heap_number);
+ } else {
+ // Smi compared non-strictly with a non-smi, non-heap-number. Call the
+ // runtime.
+ __ JumpIfNotObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, slow);
+ }
+
+ // Left is the smi. Right is a heap number. Load right value into right_d, and
+ // convert left smi into double in left_d.
+ __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset));
+ __ SmiUntagToDouble(left_d, left);
+ __ B(&done);
+
+ __ Bind(&right_is_smi);
+ // Right is a smi. Check whether the non-smi left is a heap number.
+ if (strict) {
+ // If left is not a number and right is a smi then strict equality cannot
+ // succeed. Return non-equal.
+ Label is_heap_number;
+ __ JumpIfObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE,
+ &is_heap_number);
+ // Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
+ if (!left.is(result)) {
+ __ Mov(result, NOT_EQUAL);
+ }
+ __ Ret();
+ __ Bind(&is_heap_number);
+ } else {
+ // Smi compared non-strictly with a non-smi, non-heap-number. Call the
+ // runtime.
+ __ JumpIfNotObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, slow);
+ }
+
+ // Right is the smi. Left is a heap number. Load left value into left_d, and
+ // convert right smi into double in right_d.
+ __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset));
+ __ SmiUntagToDouble(right_d, right);
+
+ // Fall through to both_loaded_as_doubles.
+ __ Bind(&done);
+}
+
+
+// Fast negative check for internalized-to-internalized equality.
+// See call site for description.
+static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register left_map,
+ Register right_map,
+ Register left_type,
+ Register right_type,
+ Label* possible_strings,
+ Label* not_both_strings) {
+ ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type));
+ Register result = x0;
+
+ Label object_test;
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ // TODO(all): reexamine this branch sequence for optimisation wrt branch
+ // prediction.
+ __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
+ __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
+ __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings);
+ __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
+
+ // Both are internalized. We already checked that they weren't the same
+ // pointer, so they are not equal.
+ __ Mov(result, NOT_EQUAL);
+ __ Ret();
+
+ __ Bind(&object_test);
+
+ __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+
+ // If right >= FIRST_SPEC_OBJECT_TYPE, test left.
+ // Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition.
+ __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge);
+
+ __ B(lt, not_both_strings);
+
+ // If both objects are undetectable, they are equal. Otherwise, they are not
+ // equal, since they are different objects and an object is not equal to
+ // undefined.
+
+ // Returning here, so we can corrupt right_type and left_type.
+ Register right_bitfield = right_type;
+ Register left_bitfield = left_type;
+ __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
+ __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
+ __ And(result, right_bitfield, left_bitfield);
+ __ And(result, result, 1 << Map::kIsUndetectable);
+ __ Eor(result, result, 1 << Map::kIsUndetectable);
+ __ Ret();
+}
+
+
+static void ICCompareStub_CheckInputType(MacroAssembler* masm,
+ Register input,
+ Register scratch,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+ DONT_DO_SMI_CHECK);
+ }
+ // We could be strict about internalized/non-internalized here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ Bind(&ok);
+}
+
+
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+ Register lhs = x1;
+ Register rhs = x0;
+ Register result = x0;
+ Condition cond = GetCondition();
+
+ Label miss;
+ ICCompareStub_CheckInputType(masm, lhs, x2, left_, &miss);
+ ICCompareStub_CheckInputType(masm, rhs, x3, right_, &miss);
+
+ Label slow; // Call builtin.
+ Label not_smis, both_loaded_as_doubles;
+ Label not_two_smis, smi_done;
+ __ JumpIfEitherNotSmi(lhs, rhs, &not_two_smis);
+ __ SmiUntag(lhs);
+ __ Sub(result, lhs, Operand::UntagSmi(rhs));
+ __ Ret();
+
+ __ Bind(&not_two_smis);
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so it is
+ // certain that at least one operand isn't a smi.
+
+ // Handle the case where the objects are identical. Either returns the answer
+ // or goes to slow. Only falls through if the objects were not identical.
+ EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
+
+ // If either is a smi (we know that at least one is not a smi), then they can
+ // only be strictly equal if the other is a HeapNumber.
+ __ JumpIfBothNotSmi(lhs, rhs, &not_smis);
+
+ // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that
+ // can:
+ // 1) Return the answer.
+ // 2) Branch to the slow case.
+ // 3) Fall through to both_loaded_as_doubles.
+ // In case 3, we have found out that we were dealing with a number-number
+ // comparison. The double values of the numbers have been loaded, right into
+ // rhs_d, left into lhs_d.
+ FPRegister rhs_d = d0;
+ FPRegister lhs_d = d1;
+ EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, x10, &slow, strict());
+
+ __ Bind(&both_loaded_as_doubles);
+ // The arguments have been converted to doubles and stored in rhs_d and
+ // lhs_d.
+ Label nan;
+ __ Fcmp(lhs_d, rhs_d);
+ __ B(vs, &nan); // Overflow flag set if either is NaN.
+ STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
+ __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
+ __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
+ __ Ret();
+
+ __ Bind(&nan);
+ // Left and/or right is a NaN. Load the result register with whatever makes
+ // the comparison fail, since comparisons with NaN always fail (except ne,
+ // which is filtered out at a higher level.)
+ ASSERT(cond != ne);
+ if ((cond == lt) || (cond == le)) {
+ __ Mov(result, GREATER);
+ } else {
+ __ Mov(result, LESS);
+ }
+ __ Ret();
+
+ __ Bind(&not_smis);
+ // At this point we know we are dealing with two different objects, and
+ // neither of them is a smi. The objects are in rhs_ and lhs_.
+
+ // Load the maps and types of the objects.
+ Register rhs_map = x10;
+ Register rhs_type = x11;
+ Register lhs_map = x12;
+ Register lhs_type = x13;
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
+ __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
+
+ if (strict()) {
+ // This emits a non-equal return sequence for some object types, or falls
+ // through if it was not lucky.
+ EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14);
+ }
+
+ Label check_for_internalized_strings;
+ Label flat_string_check;
+ // Check for heap number comparison. Branch to earlier double comparison code
+ // if they are heap numbers, otherwise, branch to internalized string check.
+ __ Cmp(rhs_type, HEAP_NUMBER_TYPE);
+ __ B(ne, &check_for_internalized_strings);
+ __ Cmp(lhs_map, rhs_map);
+
+ // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat
+ // string check.
+ __ B(ne, &flat_string_check);
+
+ // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double
+ // comparison code.
+ __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ B(&both_loaded_as_doubles);
+
+ __ Bind(&check_for_internalized_strings);
+ // In the strict case, the EmitStrictTwoHeapObjectCompare already took care
+ // of internalized strings.
+ if ((cond == eq) && !strict()) {
+ // Returns an answer for two internalized strings or two detectable objects.
+ // Otherwise branches to the string case or not both strings case.
+ EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map,
+ lhs_type, rhs_type,
+ &flat_string_check, &slow);
+ }
+
+ // Check for both being sequential ASCII strings, and inline if that is the
+ // case.
+ __ Bind(&flat_string_check);
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(lhs_type, rhs_type, x14,
+ x15, &slow);
+
+ Isolate* isolate = masm->isolate();
+ __ IncrementCounter(isolate->counters()->string_compare_native(), 1, x10,
+ x11);
+ if (cond == eq) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, rhs,
+ x10, x11, x12);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm, lhs, rhs,
+ x10, x11, x12, x13);
+ }
+
+ // Never fall through to here.
+ if (FLAG_debug_code) {
+ __ Unreachable();
+ }
+
+ __ Bind(&slow);
+
+ __ Push(lhs, rhs);
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript native;
+ if (cond == eq) {
+ native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ native = Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if ((cond == lt) || (cond == le)) {
+ ncr = GREATER;
+ } else {
+ ASSERT((cond == gt) || (cond == ge)); // remaining cases
+ ncr = LESS;
+ }
+ __ Mov(x10, Smi::FromInt(ncr));
+ __ Push(x10);
+ }
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_FUNCTION);
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ // Preserve caller-saved registers x0-x7 and x10-x15. We don't care if x8, x9,
+ // ip0 and ip1 are corrupted by the call into C.
+ CPURegList saved_regs = kCallerSaved;
+ saved_regs.Remove(ip0);
+ saved_regs.Remove(ip1);
+ saved_regs.Remove(x8);
+ saved_regs.Remove(x9);
+
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ __ PushCPURegList(saved_regs);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PushCPURegList(kCallerSavedFP);
+ }
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ Mov(x0, ExternalReference::isolate_address(masm->isolate()));
+ __ CallCFunction(
+ ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ 1, 0);
+
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PopCPURegList(kCallerSavedFP);
+ }
+ __ PopCPURegList(saved_regs);
+ __ Ret();
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
+ StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+ stub1.GetCode(isolate);
+ StoreBufferOverflowStub stub2(kSaveFPRegs);
+ stub2.GetCode(isolate);
+}
+
+
+void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+ UseScratchRegisterScope temps(masm);
+ Register saved_lr = temps.UnsafeAcquire(to_be_pushed_lr());
+ Register return_address = temps.AcquireX();
+ __ Mov(return_address, lr);
+ // Restore lr with the value it had before the call to this stub (the value
+ // which must be pushed).
+ __ Mov(lr, saved_lr);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PushSafepointRegistersAndDoubles();
+ } else {
+ __ PushSafepointRegisters();
+ }
+ __ Ret(return_address);
+}
+
+
+void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
+ MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+ UseScratchRegisterScope temps(masm);
+ Register return_address = temps.AcquireX();
+ // Preserve the return address (lr will be clobbered by the pop).
+ __ Mov(return_address, lr);
+ if (save_doubles_ == kSaveFPRegs) {
+ __ PopSafepointRegistersAndDoubles();
+ } else {
+ __ PopSafepointRegisters();
+ }
+ __ Ret(return_address);
+}
+
+
+void MathPowStub::Generate(MacroAssembler* masm) {
+ // Stack on entry:
+ // jssp[0]: Exponent (as a tagged value).
+ // jssp[1]: Base (as a tagged value).
+ //
+ // The (tagged) result will be returned in x0, as a heap number.
+
+ Register result_tagged = x0;
+ Register base_tagged = x10;
+ Register exponent_tagged = x11;
+ Register exponent_integer = x12;
+ Register scratch1 = x14;
+ Register scratch0 = x15;
+ Register saved_lr = x19;
+ FPRegister result_double = d0;
+ FPRegister base_double = d0;
+ FPRegister exponent_double = d1;
+ FPRegister base_double_copy = d2;
+ FPRegister scratch1_double = d6;
+ FPRegister scratch0_double = d7;
+
+ // A fast-path for integer exponents.
+ Label exponent_is_smi, exponent_is_integer;
+ // Bail out to runtime.
+ Label call_runtime;
+ // Allocate a heap number for the result, and return it.
+ Label done;
+
+ // Unpack the inputs.
+ if (exponent_type_ == ON_STACK) {
+ Label base_is_smi;
+ Label unpack_exponent;
+
+ __ Pop(exponent_tagged, base_tagged);
+
+ __ JumpIfSmi(base_tagged, &base_is_smi);
+ __ JumpIfNotHeapNumber(base_tagged, &call_runtime);
+ // base_tagged is a heap number, so load its double value.
+ __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset));
+ __ B(&unpack_exponent);
+ __ Bind(&base_is_smi);
+ // base_tagged is a SMI, so untag it and convert it to a double.
+ __ SmiUntagToDouble(base_double, base_tagged);
+
+ __ Bind(&unpack_exponent);
+ // x10 base_tagged The tagged base (input).
+ // x11 exponent_tagged The tagged exponent (input).
+ // d1 base_double The base as a double.
+ __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
+ __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime);
+ // exponent_tagged is a heap number, so load its double value.
+ __ Ldr(exponent_double,
+ FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
+ } else if (exponent_type_ == TAGGED) {
+ __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
+ __ Ldr(exponent_double,
+ FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
+ }
+
+ // Handle double (heap number) exponents.
+ if (exponent_type_ != INTEGER) {
+ // Detect integer exponents stored as doubles and handle those in the
+ // integer fast-path.
+ __ TryConvertDoubleToInt64(exponent_integer, exponent_double,
+ scratch0_double, &exponent_is_integer);
+
+ if (exponent_type_ == ON_STACK) {
+ FPRegister half_double = d3;
+ FPRegister minus_half_double = d4;
+ // Detect square root case. Crankshaft detects constant +/-0.5 at compile
+ // time and uses DoMathPowHalf instead. We then skip this check for
+ // non-constant cases of +/-0.5 as these hardly occur.
+
+ __ Fmov(minus_half_double, -0.5);
+ __ Fmov(half_double, 0.5);
+ __ Fcmp(minus_half_double, exponent_double);
+ __ Fccmp(half_double, exponent_double, NZFlag, ne);
+ // Condition flags at this point:
+ // 0.5; nZCv // Identified by eq && pl
+ // -0.5: NZcv // Identified by eq && mi
+ // other: ?z?? // Identified by ne
+ __ B(ne, &call_runtime);
+
+ // The exponent is 0.5 or -0.5.
+
+ // Given that exponent is known to be either 0.5 or -0.5, the following
+ // special cases could apply (according to ECMA-262 15.8.2.13):
+ //
+ // base.isNaN(): The result is NaN.
+ // (base == +INFINITY) || (base == -INFINITY)
+ // exponent == 0.5: The result is +INFINITY.
+ // exponent == -0.5: The result is +0.
+ // (base == +0) || (base == -0)
+ // exponent == 0.5: The result is +0.
+ // exponent == -0.5: The result is +INFINITY.
+ // (base < 0) && base.isFinite(): The result is NaN.
+ //
+ // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except
+ // where base is -INFINITY or -0.
+
+ // Add +0 to base. This has no effect other than turning -0 into +0.
+ __ Fadd(base_double, base_double, fp_zero);
+ // The operation -0+0 results in +0 in all cases except where the
+ // FPCR rounding mode is 'round towards minus infinity' (RM). The
+ // ARM64 simulator does not currently simulate FPCR (where the rounding
+ // mode is set), so test the operation with some debug code.
+ if (masm->emit_debug_code()) {
+ UseScratchRegisterScope temps(masm);
+ Register temp = temps.AcquireX();
+ __ Fneg(scratch0_double, fp_zero);
+ // Verify that we correctly generated +0.0 and -0.0.
+ // bits(+0.0) = 0x0000000000000000
+ // bits(-0.0) = 0x8000000000000000
+ __ Fmov(temp, fp_zero);
+ __ CheckRegisterIsClear(temp, kCouldNotGenerateZero);
+ __ Fmov(temp, scratch0_double);
+ __ Eor(temp, temp, kDSignMask);
+ __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero);
+ // Check that -0.0 + 0.0 == +0.0.
+ __ Fadd(scratch0_double, scratch0_double, fp_zero);
+ __ Fmov(temp, scratch0_double);
+ __ CheckRegisterIsClear(temp, kExpectedPositiveZero);
+ }
+
+ // If base is -INFINITY, make it +INFINITY.
+ // * Calculate base - base: All infinities will become NaNs since both
+ // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in ARM64.
+ // * If the result is NaN, calculate abs(base).
+ __ Fsub(scratch0_double, base_double, base_double);
+ __ Fcmp(scratch0_double, 0.0);
+ __ Fabs(scratch1_double, base_double);
+ __ Fcsel(base_double, scratch1_double, base_double, vs);
+
+ // Calculate the square root of base.
+ __ Fsqrt(result_double, base_double);
+ __ Fcmp(exponent_double, 0.0);
+ __ B(ge, &done); // Finish now for exponents of 0.5.
+ // Find the inverse for exponents of -0.5.
+ __ Fmov(scratch0_double, 1.0);
+ __ Fdiv(result_double, scratch0_double, result_double);
+ __ B(&done);
+ }
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ Mov(saved_lr, lr);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ __ Mov(lr, saved_lr);
+ __ B(&done);
+ }
+
+ // Handle SMI exponents.
+ __ Bind(&exponent_is_smi);
+ // x10 base_tagged The tagged base (input).
+ // x11 exponent_tagged The tagged exponent (input).
+ // d1 base_double The base as a double.
+ __ SmiUntag(exponent_integer, exponent_tagged);
+ }
+
+ __ Bind(&exponent_is_integer);
+ // x10 base_tagged The tagged base (input).
+ // x11 exponent_tagged The tagged exponent (input).
+ // x12 exponent_integer The exponent as an integer.
+ // d1 base_double The base as a double.
+
+ // Find abs(exponent). For negative exponents, we can find the inverse later.
+ Register exponent_abs = x13;
+ __ Cmp(exponent_integer, 0);
+ __ Cneg(exponent_abs, exponent_integer, mi);
+ // x13 exponent_abs The value of abs(exponent_integer).
+
+ // Repeatedly multiply to calculate the power.
+ // result = 1.0;
+ // For each bit n (exponent_integer{n}) {
+ // if (exponent_integer{n}) {
+ // result *= base;
+ // }
+ // base *= base;
+ // if (remaining bits in exponent_integer are all zero) {
+ // break;
+ // }
+ // }
+ Label power_loop, power_loop_entry, power_loop_exit;
+ __ Fmov(scratch1_double, base_double);
+ __ Fmov(base_double_copy, base_double);
+ __ Fmov(result_double, 1.0);
+ __ B(&power_loop_entry);
+
+ __ Bind(&power_loop);
+ __ Fmul(scratch1_double, scratch1_double, scratch1_double);
+ __ Lsr(exponent_abs, exponent_abs, 1);
+ __ Cbz(exponent_abs, &power_loop_exit);
+
+ __ Bind(&power_loop_entry);
+ __ Tbz(exponent_abs, 0, &power_loop);
+ __ Fmul(result_double, result_double, scratch1_double);
+ __ B(&power_loop);
+
+ __ Bind(&power_loop_exit);
+
+ // If the exponent was positive, result_double holds the result.
+ __ Tbz(exponent_integer, kXSignBit, &done);
+
+ // The exponent was negative, so find the inverse.
+ __ Fmov(scratch0_double, 1.0);
+ __ Fdiv(result_double, scratch0_double, result_double);
+ // ECMA-262 only requires Math.pow to return an 'implementation-dependent
+ // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
+ // to calculate the subnormal value 2^-1074. This method of calculating
+ // negative powers doesn't work because 2^1074 overflows to infinity. To
+ // catch this corner-case, we bail out if the result was 0. (This can only
+ // occur if the divisor is infinity or the base is zero.)
+ __ Fcmp(result_double, 0.0);
+ __ B(&done, ne);
+
+ if (exponent_type_ == ON_STACK) {
+ // Bail out to runtime code.
+ __ Bind(&call_runtime);
+ // Put the arguments back on the stack.
+ __ Push(base_tagged, exponent_tagged);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+
+ // Return.
+ __ Bind(&done);
+ __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1);
+ __ Str(result_double,
+ FieldMemOperand(result_tagged, HeapNumber::kValueOffset));
+ ASSERT(result_tagged.is(x0));
+ __ IncrementCounter(
+ masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1);
+ __ Ret();
+ } else {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ Mov(saved_lr, lr);
+ __ Fmov(base_double, base_double_copy);
+ __ Scvtf(exponent_double, exponent_integer);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ __ Mov(lr, saved_lr);
+ __ Bind(&done);
+ __ IncrementCounter(
+ masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1);
+ __ Ret();
+ }
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ // It is important that the following stubs are generated in this order
+ // because pregenerated stubs can only call other pregenerated stubs.
+ // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
+ // CEntryStub.
+ CEntryStub::GenerateAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
+ ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ StoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
+ BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+}
+
+
+void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+ StoreRegistersStateStub stub1(kDontSaveFPRegs);
+ stub1.GetCode(isolate);
+ StoreRegistersStateStub stub2(kSaveFPRegs);
+ stub2.GetCode(isolate);
+}
+
+
+void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+ RestoreRegistersStateStub stub1(kDontSaveFPRegs);
+ stub1.GetCode(isolate);
+ RestoreRegistersStateStub stub2(kSaveFPRegs);
+ stub2.GetCode(isolate);
+}
+
+
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ // Floating-point code doesn't get special handling in ARM64, so there's
+ // nothing to do here.
+ USE(isolate);
+}
+
+
+bool CEntryStub::NeedsImmovableCode() {
+ // CEntryStub stores the return address on the stack before calling into
+ // C++ code. In some cases, the VM accesses this address, but it is not used
+ // when the C++ code returns to the stub because LR holds the return address
+ // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up
+ // returning to dead code.
+ // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
+ // find any comment to confirm this, and I don't hit any crashes whatever
+ // this function returns. The anaylsis should be properly confirmed.
+ return true;
+}
+
+
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
+ CEntryStub stub(1, kDontSaveFPRegs);
+ stub.GetCode(isolate);
+ CEntryStub stub_fp(1, kSaveFPRegs);
+ stub_fp.GetCode(isolate);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal,
+ Label* throw_termination,
+ bool do_gc,
+ bool always_allocate) {
+ // x0 : Result parameter for PerformGC, if do_gc is true.
+ // x21 : argv
+ // x22 : argc
+ // x23 : target
+ //
+ // The stack (on entry) holds the arguments and the receiver, with the
+ // receiver at the highest address:
+ //
+ // argv[8]: receiver
+ // argv -> argv[0]: arg[argc-2]
+ // ... ...
+ // argv[...]: arg[1]
+ // argv[...]: arg[0]
+ //
+ // Immediately below (after) this is the exit frame, as constructed by
+ // EnterExitFrame:
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // fp[-16]: CodeObject()
+ // csp[...]: Saved doubles, if saved_doubles is true.
+ // csp[32]: Alignment padding, if necessary.
+ // csp[24]: Preserved x23 (used for target).
+ // csp[16]: Preserved x22 (used for argc).
+ // csp[8]: Preserved x21 (used for argv).
+ // csp -> csp[0]: Space reserved for the return address.
+ //
+ // After a successful call, the exit frame, preserved registers (x21-x23) and
+ // the arguments (including the receiver) are dropped or popped as
+ // appropriate. The stub then returns.
+ //
+ // After an unsuccessful call, the exit frame and suchlike are left
+ // untouched, and the stub either throws an exception by jumping to one of
+ // the provided throw_ labels, or it falls through. The failure details are
+ // passed through in x0.
+ ASSERT(csp.Is(__ StackPointer()));
+
+ Isolate* isolate = masm->isolate();
+
+ const Register& argv = x21;
+ const Register& argc = x22;
+ const Register& target = x23;
+
+ if (do_gc) {
+ // Call Runtime::PerformGC, passing x0 (the result parameter for
+ // PerformGC) and x1 (the isolate).
+ __ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
+ __ CallCFunction(
+ ExternalReference::perform_gc_function(isolate), 2, 0);
+ }
+
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth(isolate);
+ if (always_allocate) {
+ __ Mov(x10, Operand(scope_depth));
+ __ Ldr(x11, MemOperand(x10));
+ __ Add(x11, x11, 1);
+ __ Str(x11, MemOperand(x10));
+ }
+
+ // Prepare AAPCS64 arguments to pass to the builtin.
+ __ Mov(x0, argc);
+ __ Mov(x1, argv);
+ __ Mov(x2, ExternalReference::isolate_address(isolate));
+
+ // Store the return address on the stack, in the space previously allocated
+ // by EnterExitFrame. The return address is queried by
+ // ExitFrame::GetStateForFramePointer.
+ Label return_location;
+ __ Adr(x12, &return_location);
+ __ Poke(x12, 0);
+ if (__ emit_debug_code()) {
+ // Verify that the slot below fp[kSPOffset]-8 points to the return location
+ // (currently in x12).
+ UseScratchRegisterScope temps(masm);
+ Register temp = temps.AcquireX();
+ __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
+ __ Cmp(temp, x12);
+ __ Check(eq, kReturnAddressNotFoundInFrame);
+ }
+
+ // Call the builtin.
+ __ Blr(target);
+ __ Bind(&return_location);
+ const Register& result = x0;
+
+ if (always_allocate) {
+ __ Mov(x10, Operand(scope_depth));
+ __ Ldr(x11, MemOperand(x10));
+ __ Sub(x11, x11, 1);
+ __ Str(x11, MemOperand(x10));
+ }
+
+ // x0 result The return code from the call.
+ // x21 argv
+ // x22 argc
+ // x23 target
+ //
+ // If all of the result bits matching kFailureTagMask are '1', the result is
+ // a failure. Otherwise, it's an ordinary tagged object and the call was a
+ // success.
+ Label failure;
+ __ And(x10, result, kFailureTagMask);
+ __ Cmp(x10, kFailureTagMask);
+ __ B(&failure, eq);
+
+ // The call succeeded, so unwind the stack and return.
+
+ // Restore callee-saved registers x21-x23.
+ __ Mov(x11, argc);
+
+ __ Peek(argv, 1 * kPointerSize);
+ __ Peek(argc, 2 * kPointerSize);
+ __ Peek(target, 3 * kPointerSize);
+
+ __ LeaveExitFrame(save_doubles_, x10, true);
+ ASSERT(jssp.Is(__ StackPointer()));
+ // Pop or drop the remaining stack slots and return from the stub.
+ // jssp[24]: Arguments array (of size argc), including receiver.
+ // jssp[16]: Preserved x23 (used for target).
+ // jssp[8]: Preserved x22 (used for argc).
+ // jssp[0]: Preserved x21 (used for argv).
+ __ Drop(x11);
+ __ Ret();
+
+ // The stack pointer is still csp if we aren't returning, and the frame
+ // hasn't changed (except for the return address).
+ __ SetStackPointer(csp);
+
+ __ Bind(&failure);
+ // The call failed, so check if we need to throw an exception, and fall
+ // through (to retry) otherwise.
+
+ Label retry;
+ // x0 result The return code from the call, including the failure
+ // code and details.
+ // x21 argv
+ // x22 argc
+ // x23 target
+ // Refer to the Failure class for details of the bit layout.
+ STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ Tst(result, kFailureTypeTagMask << kFailureTagSize);
+ __ B(eq, &retry); // RETRY_AFTER_GC
+
+ // Retrieve the pending exception.
+ const Register& exception = result;
+ const Register& exception_address = x11;
+ __ Mov(exception_address,
+ Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ Ldr(exception, MemOperand(exception_address));
+
+ // Clear the pending exception.
+ __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
+ __ Str(x10, MemOperand(exception_address));
+
+ // x0 exception The exception descriptor.
+ // x21 argv
+ // x22 argc
+ // x23 target
+
+ // Special handling of termination exceptions, which are uncatchable by
+ // JavaScript code.
+ __ Cmp(exception, Operand(isolate->factory()->termination_exception()));
+ __ B(eq, throw_termination);
+
+ // Handle normal exception.
+ __ B(throw_normal);
+
+ __ Bind(&retry);
+ // The result (x0) is passed through as the next PerformGC parameter.
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // The Abort mechanism relies on CallRuntime, which in turn relies on
+ // CEntryStub, so until this stub has been generated, we have to use a
+ // fall-back Abort mechanism.
+ //
+ // Note that this stub must be generated before any use of Abort.
+ MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+
+ ASM_LOCATION("CEntryStub::Generate entry");
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Register parameters:
+ // x0: argc (including receiver, untagged)
+ // x1: target
+ //
+ // The stack on entry holds the arguments and the receiver, with the receiver
+ // at the highest address:
+ //
+ // jssp]argc-1]: receiver
+ // jssp[argc-2]: arg[argc-2]
+ // ... ...
+ // jssp[1]: arg[1]
+ // jssp[0]: arg[0]
+ //
+ // The arguments are in reverse order, so that arg[argc-2] is actually the
+ // first argument to the target function and arg[0] is the last.
+ ASSERT(jssp.Is(__ StackPointer()));
+ const Register& argc_input = x0;
+ const Register& target_input = x1;
+
+ // Calculate argv, argc and the target address, and store them in
+ // callee-saved registers so we can retry the call without having to reload
+ // these arguments.
+ // TODO(jbramley): If the first call attempt succeeds in the common case (as
+ // it should), then we might be better off putting these parameters directly
+ // into their argument registers, rather than using callee-saved registers and
+ // preserving them on the stack.
+ const Register& argv = x21;
+ const Register& argc = x22;
+ const Register& target = x23;
+
+ // Derive argv from the stack pointer so that it points to the first argument
+ // (arg[argc-2]), or just below the receiver in case there are no arguments.
+ // - Adjust for the arg[] array.
+ Register temp_argv = x11;
+ __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
+ // - Adjust for the receiver.
+ __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
+
+ // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
+ // registers.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(save_doubles_, x10, 3);
+ ASSERT(csp.Is(__ StackPointer()));
+
+ // Poke callee-saved registers into reserved space.
+ __ Poke(argv, 1 * kPointerSize);
+ __ Poke(argc, 2 * kPointerSize);
+ __ Poke(target, 3 * kPointerSize);
+
+ // We normally only keep tagged values in callee-saved registers, as they
+ // could be pushed onto the stack by called stubs and functions, and on the
+ // stack they can confuse the GC. However, we're only calling C functions
+ // which can push arbitrary data onto the stack anyway, and so the GC won't
+ // examine that part of the stack.
+ __ Mov(argc, argc_input);
+ __ Mov(target, target_input);
+ __ Mov(argv, temp_argv);
+
+ Label throw_normal;
+ Label throw_termination;
+
+ // Call the runtime function.
+ GenerateCore(masm,
+ &throw_normal,
+ &throw_termination,
+ false,
+ false);
+
+ // If successful, the previous GenerateCore will have returned to the
+ // calling code. Otherwise, we fall through into the following.
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal,
+ &throw_termination,
+ true,
+ false);
+
+ // Do full GC and retry runtime call one final time.
+ __ Mov(x0, reinterpret_cast<uint64_t>(Failure::InternalError()));
+ GenerateCore(masm,
+ &throw_normal,
+ &throw_termination,
+ true,
+ true);
+
+ { FrameScope scope(masm, StackFrame::MANUAL);
+ __ CallCFunction(
+ ExternalReference::out_of_memory_function(masm->isolate()), 0);
+ }
+
+ // We didn't execute a return case, so the stack frame hasn't been updated
+ // (except for the return address slot). However, we don't need to initialize
+ // jssp because the throw method will immediately overwrite it when it
+ // unwinds the stack.
+ __ SetStackPointer(jssp);
+
+ // Throw exceptions.
+ // If we throw an exception, we can end up re-entering CEntryStub before we
+ // pop the exit frame, so need to ensure that x21-x23 contain GC-safe values
+ // here.
+
+ __ Bind(&throw_termination);
+ ASM_LOCATION("Throw termination");
+ __ Mov(argv, 0);
+ __ Mov(argc, 0);
+ __ Mov(target, 0);
+ __ ThrowUncatchable(x0, x10, x11, x12, x13);
+
+ __ Bind(&throw_normal);
+ ASM_LOCATION("Throw normal");
+ __ Mov(argv, 0);
+ __ Mov(argc, 0);
+ __ Mov(target, 0);
+ __ Throw(x0, x10, x11, x12, x13);
+}
+
+
+// This is the entry point from C++. 5 arguments are provided in x0-x4.
+// See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
+// Input:
+// x0: code entry.
+// x1: function.
+// x2: receiver.
+// x3: argc.
+// x4: argv.
+// Output:
+// x0: result.
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ ASSERT(jssp.Is(__ StackPointer()));
+ Register code_entry = x0;
+
+ // Enable instruction instrumentation. This only works on the simulator, and
+ // will have no effect on the model or real hardware.
+ __ EnableInstrumentation();
+
+ Label invoke, handler_entry, exit;
+
+ // Push callee-saved registers and synchronize the system stack pointer (csp)
+ // and the JavaScript stack pointer (jssp).
+ //
+ // We must not write to jssp until after the PushCalleeSavedRegisters()
+ // call, since jssp is itself a callee-saved register.
+ __ SetStackPointer(csp);
+ __ PushCalleeSavedRegisters();
+ __ Mov(jssp, csp);
+ __ SetStackPointer(jssp);
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+ // Set up the reserved register for 0.0.
+ __ Fmov(fp_zero, 0.0);
+
+ // Build an entry frame (see layout below).
+ Isolate* isolate = masm->isolate();
+
+ // Build an entry frame.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
+ __ Mov(x13, bad_frame_pointer);
+ __ Mov(x12, Smi::FromInt(marker));
+ __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate));
+ __ Ldr(x10, MemOperand(x11));
+
+ __ Push(x13, xzr, x12, x10);
+ // Set up fp.
+ __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
+
+ // Push the JS entry frame marker. Also set js_entry_sp if this is the
+ // outermost JS call.
+ Label non_outermost_js, done;
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
+ __ Mov(x10, ExternalReference(js_entry_sp));
+ __ Ldr(x11, MemOperand(x10));
+ __ Cbnz(x11, &non_outermost_js);
+ __ Str(fp, MemOperand(x10));
+ __ Mov(x12, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ Push(x12);
+ __ B(&done);
+ __ Bind(&non_outermost_js);
+ // We spare one instruction by pushing xzr since the marker is 0.
+ ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
+ __ Push(xzr);
+ __ Bind(&done);
+
+ // The frame set up looks like this:
+ // jssp[0] : JS entry frame marker.
+ // jssp[1] : C entry FP.
+ // jssp[2] : stack frame marker.
+ // jssp[3] : stack frmae marker.
+ // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
+
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ B(&invoke);
+
+ // Prevent the constant pool from being emitted between the record of the
+ // handler_entry position and the first instruction of the sequence here.
+ // There is no risk because Assembler::Emit() emits the instruction before
+ // checking for constant pool emission, but we do not want to depend on
+ // that.
+ {
+ Assembler::BlockPoolsScope block_pools(masm);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ }
+ __ Str(code_entry, MemOperand(x10));
+ __ Mov(x0, Operand(reinterpret_cast<int64_t>(Failure::Exception())));
+ __ B(&exit);
+
+ // Invoke: Link this frame into the handler chain. There's only one
+ // handler block in this code object, so its index is 0.
+ __ Bind(&invoke);
+ __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the B(&invoke) above, which
+ // restores all callee-saved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+
+ // Clear any pending exceptions.
+ __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
+ __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ Str(x10, MemOperand(x11));
+
+ // Invoke the function by calling through the JS entry trampoline builtin.
+ // Notice that we cannot store a reference to the trampoline code directly in
+ // this stub, because runtime stubs are not traversed when doing GC.
+
+ // Expected registers by Builtins::JSEntryTrampoline
+ // x0: code entry.
+ // x1: function.
+ // x2: receiver.
+ // x3: argc.
+ // x4: argv.
+ ExternalReference entry(is_construct ? Builtins::kJSConstructEntryTrampoline
+ : Builtins::kJSEntryTrampoline,
+ isolate);
+ __ Mov(x10, entry);
+
+ // Call the JSEntryTrampoline.
+ __ Ldr(x11, MemOperand(x10)); // Dereference the address.
+ __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
+ __ Blr(x12);
+
+ // Unlink this frame from the handler chain.
+ __ PopTryHandler();
+
+
+ __ Bind(&exit);
+ // x0 holds the result.
+ // The stack pointer points to the top of the entry frame pushed on entry from
+ // C++ (at the beginning of this stub):
+ // jssp[0] : JS entry frame marker.
+ // jssp[1] : C entry FP.
+ // jssp[2] : stack frame marker.
+ // jssp[3] : stack frmae marker.
+ // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
+
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ Pop(x10);
+ __ Cmp(x10, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ B(ne, &non_outermost_js_2);
+ __ Mov(x11, ExternalReference(js_entry_sp));
+ __ Str(xzr, MemOperand(x11));
+ __ Bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ Pop(x10);
+ __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate));
+ __ Str(x10, MemOperand(x11));
+
+ // Reset the stack to the callee saved registers.
+ __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
+ // Restore the callee-saved registers and return.
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Mov(csp, jssp);
+ __ SetStackPointer(csp);
+ __ PopCalleeSavedRegisters();
+ // After this point, we must not modify jssp because it is a callee-saved
+ // register which we have just restored.
+ __ Ret();
+}
+
+
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x1 : receiver
+ // -- x0 : key
+ // -----------------------------------
+ Register key = x0;
+ receiver = x1;
+ __ Cmp(key, Operand(masm->isolate()->factory()->prototype_string()));
+ __ B(ne, &miss);
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x2 : name
+ // -- x0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = x0;
+ }
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss);
+
+ __ Bind(&miss);
+ StubCompiler::TailCallBuiltin(masm,
+ BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+}
+
+
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Stack on entry:
+ // jssp[0]: function.
+ // jssp[8]: object.
+ //
+ // Returns result in x0. Zero indicates instanceof, smi 1 indicates not
+ // instanceof.
+
+ Register result = x0;
+ Register function = right();
+ Register object = left();
+ Register scratch1 = x6;
+ Register scratch2 = x7;
+ Register res_true = x8;
+ Register res_false = x9;
+ // Only used if there was an inline map check site. (See
+ // LCodeGen::DoInstanceOfKnownGlobal().)
+ Register map_check_site = x4;
+ // Delta for the instructions generated between the inline map check and the
+ // instruction setting the result.
+ const int32_t kDeltaToLoadBoolResult = 4 * kInstructionSize;
+
+ Label not_js_object, slow;
+
+ if (!HasArgsInRegisters()) {
+ __ Pop(function, object);
+ }
+
+ if (ReturnTrueFalseObject()) {
+ __ LoadTrueFalseRoots(res_true, res_false);
+ } else {
+ // This is counter-intuitive, but correct.
+ __ Mov(res_true, Smi::FromInt(0));
+ __ Mov(res_false, Smi::FromInt(1));
+ }
+
+ // Check that the left hand side is a JS object and load its map as a side
+ // effect.
+ Register map = x12;
+ __ JumpIfSmi(object, &not_js_object);
+ __ IsObjectJSObjectType(object, map, scratch2, &not_js_object);
+
+ // If there is a call site cache, don't look in the global cache, but do the
+ // real lookup and update the call site cache.
+ if (!HasCallSiteInlineCheck()) {
+ Label miss;
+ __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss);
+ __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss);
+ __ LoadRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
+ __ Ret();
+ __ Bind(&miss);
+ }
+
+ // Get the prototype of the function.
+ Register prototype = x13;
+ __ TryGetFunctionPrototype(function, prototype, scratch2, &slow,
+ MacroAssembler::kMissOnBoundFunction);
+
+ // Check that the function prototype is a JS object.
+ __ JumpIfSmi(prototype, &slow);
+ __ IsObjectJSObjectType(prototype, scratch1, scratch2, &slow);
+
+ // Update the global instanceof or call site inlined cache with the current
+ // map and function. The cached answer will be set when it is known below.
+ if (HasCallSiteInlineCheck()) {
+ // Patch the (relocated) inlined map check.
+ __ GetRelocatedValueLocation(map_check_site, scratch1);
+ // We have a cell, so need another level of dereferencing.
+ __ Ldr(scratch1, MemOperand(scratch1));
+ __ Str(map, FieldMemOperand(scratch1, Cell::kValueOffset));
+ } else {
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+ }
+
+ Label return_true, return_result;
+ {
+ // Loop through the prototype chain looking for the function prototype.
+ Register chain_map = x1;
+ Register chain_prototype = x14;
+ Register null_value = x15;
+ Label loop;
+ __ Ldr(chain_prototype, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ // Speculatively set a result.
+ __ Mov(result, res_false);
+
+ __ Bind(&loop);
+
+ // If the chain prototype is the object prototype, return true.
+ __ Cmp(chain_prototype, prototype);
+ __ B(eq, &return_true);
+
+ // If the chain prototype is null, we've reached the end of the chain, so
+ // return false.
+ __ Cmp(chain_prototype, null_value);
+ __ B(eq, &return_result);
+
+ // Otherwise, load the next prototype in the chain, and loop.
+ __ Ldr(chain_map, FieldMemOperand(chain_prototype, HeapObject::kMapOffset));
+ __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset));
+ __ B(&loop);
+ }
+
+ // Return sequence when no arguments are on the stack.
+ // We cannot fall through to here.
+ __ Bind(&return_true);
+ __ Mov(result, res_true);
+ __ Bind(&return_result);
+ if (HasCallSiteInlineCheck()) {
+ ASSERT(ReturnTrueFalseObject());
+ __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
+ __ GetRelocatedValueLocation(map_check_site, scratch2);
+ __ Str(result, MemOperand(scratch2));
+ } else {
+ __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
+ }
+ __ Ret();
+
+ Label object_not_null, object_not_null_or_smi;
+
+ __ Bind(&not_js_object);
+ Register object_type = x14;
+ // x0 result result return register (uninit)
+ // x10 function pointer to function
+ // x11 object pointer to object
+ // x14 object_type type of object (uninit)
+
+ // Before null, smi and string checks, check that the rhs is a function.
+ // For a non-function rhs, an exception must be thrown.
+ __ JumpIfSmi(function, &slow);
+ __ JumpIfNotObjectType(
+ function, scratch1, object_type, JS_FUNCTION_TYPE, &slow);
+
+ __ Mov(result, res_false);
+
+ // Null is not instance of anything.
+ __ Cmp(object_type, Operand(masm->isolate()->factory()->null_value()));
+ __ B(ne, &object_not_null);
+ __ Ret();
+
+ __ Bind(&object_not_null);
+ // Smi values are not instances of anything.
+ __ JumpIfNotSmi(object, &object_not_null_or_smi);
+ __ Ret();
+
+ __ Bind(&object_not_null_or_smi);
+ // String values are not instances of anything.
+ __ IsObjectJSStringType(object, scratch2, &slow);
+ __ Ret();
+
+ // Slow-case. Tail call builtin.
+ __ Bind(&slow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Arguments have either been passed into registers or have been previously
+ // popped. We need to push them before calling builtin.
+ __ Push(object, function);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ }
+ if (ReturnTrueFalseObject()) {
+ // Reload true/false because they were clobbered in the builtin call.
+ __ LoadTrueFalseRoots(res_true, res_false);
+ __ Cmp(result, 0);
+ __ Csel(result, res_true, res_false, eq);
+ }
+ __ Ret();
+}
+
+
+Register InstanceofStub::left() {
+ // Object to check (instanceof lhs).
+ return x11;
+}
+
+
+Register InstanceofStub::right() {
+ // Constructor function (instanceof rhs).
+ return x10;
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ Register arg_count = x0;
+ Register key = x1;
+
+ // The displacement is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement =
+ StandardFrameConstants::kCallerSPOffset - kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ JumpIfNotSmi(key, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Register local_fp = x11;
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label skip_adaptor;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx, MemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Csel(local_fp, fp, caller_fp, ne);
+ __ B(ne, &skip_adaptor);
+
+ // Load the actual arguments limit found in the arguments adaptor frame.
+ __ Ldr(arg_count, MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Bind(&skip_adaptor);
+
+ // Check index against formal parameters count limit. Use unsigned comparison
+ // to get negative check for free: branch if key < 0 or key >= arg_count.
+ __ Cmp(key, arg_count);
+ __ B(hs, &slow);
+
+ // Read the argument from the stack and return it.
+ __ Sub(x10, arg_count, key);
+ __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2));
+ __ Ldr(x0, MemOperand(x10, kDisplacement));
+ __ Ret();
+
+ // Slow case: handle non-smi or out-of-bounds access to arguments by calling
+ // the runtime system.
+ __ Bind(&slow);
+ __ Push(key);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // jssp[0]: number of parameters (tagged)
+ // jssp[8]: address of receiver argument
+ // jssp[16]: function
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ Register caller_fp = x10;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ // Load and untag the context.
+ STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4);
+ __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset +
+ (kSmiShift / kBitsPerByte)));
+ __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
+ __ B(ne, &runtime);
+
+ // Patch the arguments.length and parameters pointer in the current frame.
+ __ Ldr(x11, MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Poke(x11, 0 * kXRegSize);
+ __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2));
+ __ Add(x10, x10, StandardFrameConstants::kCallerSPOffset);
+ __ Poke(x10, 1 * kXRegSize);
+
+ __ Bind(&runtime);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // jssp[0]: number of parameters (tagged)
+ // jssp[8]: address of receiver argument
+ // jssp[16]: function
+ //
+ // Returns pointer to result object in x0.
+
+ // Note: arg_count_smi is an alias of param_count_smi.
+ Register arg_count_smi = x3;
+ Register param_count_smi = x3;
+ Register param_count = x7;
+ Register recv_arg = x14;
+ Register function = x4;
+ __ Pop(param_count_smi, recv_arg, function);
+ __ SmiUntag(param_count, param_count_smi);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx, MemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(eq, &adaptor_frame);
+
+ // No adaptor, parameter count = argument count.
+
+ // x1 mapped_params number of mapped params, min(params, args) (uninit)
+ // x2 arg_count number of function arguments (uninit)
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x7 param_count number of function parameters
+ // x11 caller_fp caller's frame pointer
+ // x14 recv_arg pointer to receiver arguments
+
+ Register arg_count = x2;
+ __ Mov(arg_count, param_count);
+ __ B(&try_allocate);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ Bind(&adaptor_frame);
+ __ Ldr(arg_count_smi,
+ MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(arg_count, arg_count_smi);
+ __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
+ __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
+
+ // Compute the mapped parameter count = min(param_count, arg_count)
+ Register mapped_params = x1;
+ __ Cmp(param_count, arg_count);
+ __ Csel(mapped_params, param_count, arg_count, lt);
+
+ __ Bind(&try_allocate);
+
+ // x0 alloc_obj pointer to allocated objects: param map, backing
+ // store, arguments (uninit)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x7 param_count number of function parameters
+ // x10 size size of objects to allocate (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ // Compute the size of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has two extra words containing context and backing
+ // store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+
+ // Calculate the parameter map size, assuming it exists.
+ Register size = x10;
+ __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
+ __ Add(size, size, kParameterMapHeaderSize);
+
+ // If there are no mapped parameters, set the running size total to zero.
+ // Otherwise, use the parameter map size calculated earlier.
+ __ Cmp(mapped_params, 0);
+ __ CzeroX(size, eq);
+
+ // 2. Add the size of the backing store and arguments object.
+ __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
+ __ Add(size, size,
+ FixedArray::kHeaderSize + Heap::kSloppyArgumentsObjectSize);
+
+ // Do the allocation of all three objects in one go. Assign this to x0, as it
+ // will be returned to the caller.
+ Register alloc_obj = x0;
+ __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
+
+ // Get the arguments boilerplate from the current (global) context.
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x7 param_count number of function parameters
+ // x11 args_offset offset to args (or aliased args) boilerplate (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ Register global_object = x10;
+ Register global_ctx = x10;
+ Register args_offset = x11;
+ Register aliased_args_offset = x10;
+ __ Ldr(global_object, GlobalObjectMemOperand());
+ __ Ldr(global_ctx, FieldMemOperand(global_object,
+ GlobalObject::kNativeContextOffset));
+
+ __ Ldr(args_offset,
+ ContextMemOperand(global_ctx,
+ Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX));
+ __ Ldr(aliased_args_offset,
+ ContextMemOperand(global_ctx,
+ Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX));
+ __ Cmp(mapped_params, 0);
+ __ CmovX(args_offset, aliased_args_offset, ne);
+
+ // Copy the JS object part.
+ __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13),
+ JSObject::kHeaderSize / kPointerSize);
+
+ // Set up the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ const int kCalleeOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize;
+ __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset));
+
+ // Use the length and set that as an in-object property.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize;
+ __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, "elements" will point there, otherwise
+ // it will point to the backing store.
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x5 elements pointer to parameter map or backing store (uninit)
+ // x6 backing_store pointer to backing store (uninit)
+ // x7 param_count number of function parameters
+ // x14 recv_arg pointer to receiver arguments
+
+ Register elements = x5;
+ __ Add(elements, alloc_obj, Heap::kSloppyArgumentsObjectSize);
+ __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
+
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ Cmp(mapped_params, 0);
+ // Set up backing store address, because it is needed later for filling in
+ // the unmapped arguments.
+ Register backing_store = x6;
+ __ CmovX(backing_store, elements, eq);
+ __ B(eq, &skip_parameter_map);
+
+ __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
+ __ Add(x10, mapped_params, 2);
+ __ SmiTag(x10);
+ __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Str(cp, FieldMemOperand(elements,
+ FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
+ __ Add(x10, x10, kParameterMapHeaderSize);
+ __ Str(x10, FieldMemOperand(elements,
+ FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. Then index the context,
+ // where parameters are stored in reverse order, at:
+ //
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
+ //
+ // The mapped parameter thus needs to get indices:
+ //
+ // MIN_CONTEXT_SLOTS + parameter_count - 1 ..
+ // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
+ //
+ // We loop from right to left.
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x3 arg_count_smi number of function arguments (smi)
+ // x4 function function pointer
+ // x5 elements pointer to parameter map or backing store (uninit)
+ // x6 backing_store pointer to backing store (uninit)
+ // x7 param_count number of function parameters
+ // x11 loop_count parameter loop counter (uninit)
+ // x12 index parameter index (smi, uninit)
+ // x13 the_hole hole value (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ Register loop_count = x11;
+ Register index = x12;
+ Register the_hole = x13;
+ Label parameters_loop, parameters_test;
+ __ Mov(loop_count, mapped_params);
+ __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
+ __ Sub(index, index, mapped_params);
+ __ SmiTag(index);
+ __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
+ __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
+ __ Add(backing_store, backing_store, kParameterMapHeaderSize);
+
+ __ B(&parameters_test);
+
+ __ Bind(&parameters_loop);
+ __ Sub(loop_count, loop_count, 1);
+ __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
+ __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
+ __ Str(index, MemOperand(elements, x10));
+ __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
+ __ Str(the_hole, MemOperand(backing_store, x10));
+ __ Add(index, index, Smi::FromInt(1));
+ __ Bind(&parameters_test);
+ __ Cbnz(loop_count, &parameters_loop);
+
+ __ Bind(&skip_parameter_map);
+ // Copy arguments header and remaining slots (if there are any.)
+ __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
+ __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
+ __ Str(arg_count_smi, FieldMemOperand(backing_store,
+ FixedArray::kLengthOffset));
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 mapped_params number of mapped parameters, min(params, args)
+ // x2 arg_count number of function arguments
+ // x4 function function pointer
+ // x3 arg_count_smi number of function arguments (smi)
+ // x6 backing_store pointer to backing store (uninit)
+ // x14 recv_arg pointer to receiver arguments
+
+ Label arguments_loop, arguments_test;
+ __ Mov(x10, mapped_params);
+ __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
+ __ B(&arguments_test);
+
+ __ Bind(&arguments_loop);
+ __ Sub(recv_arg, recv_arg, kPointerSize);
+ __ Ldr(x11, MemOperand(recv_arg));
+ __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
+ __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
+ __ Add(x10, x10, 1);
+
+ __ Bind(&arguments_test);
+ __ Cmp(x10, arg_count);
+ __ B(lt, &arguments_loop);
+
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ __ Bind(&runtime);
+ __ Push(function, recv_arg, arg_count_smi);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // jssp[0]: number of parameters (tagged)
+ // jssp[8]: address of receiver argument
+ // jssp[16]: function
+ //
+ // Returns pointer to result object in x0.
+
+ // Get the stub arguments from the frame, and make an untagged copy of the
+ // parameter count.
+ Register param_count_smi = x1;
+ Register params = x2;
+ Register function = x3;
+ Register param_count = x13;
+ __ Pop(param_count_smi, params, function);
+ __ SmiUntag(param_count, param_count_smi);
+
+ // Test if arguments adaptor needed.
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label try_allocate, runtime;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx, MemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(ne, &try_allocate);
+
+ // x1 param_count_smi number of parameters passed to function (smi)
+ // x2 params pointer to parameters
+ // x3 function function pointer
+ // x11 caller_fp caller's frame pointer
+ // x13 param_count number of parameters passed to function
+
+ // Patch the argument length and parameters pointer.
+ __ Ldr(param_count_smi,
+ MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(param_count, param_count_smi);
+ __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
+ __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
+
+ // Try the new space allocation. Start out with computing the size of the
+ // arguments object and the elements array in words.
+ Register size = x10;
+ __ Bind(&try_allocate);
+ __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize);
+ __ Cmp(param_count, 0);
+ __ CzeroX(size, eq);
+ __ Add(size, size, Heap::kStrictArgumentsObjectSize / kPointerSize);
+
+ // Do the allocation of both objects in one go. Assign this to x0, as it will
+ // be returned to the caller.
+ Register alloc_obj = x0;
+ __ Allocate(size, alloc_obj, x11, x12, &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+ // Get the arguments boilerplate from the current (native) context.
+ Register global_object = x10;
+ Register global_ctx = x10;
+ Register args_offset = x4;
+ __ Ldr(global_object, GlobalObjectMemOperand());
+ __ Ldr(global_ctx, FieldMemOperand(global_object,
+ GlobalObject::kNativeContextOffset));
+ __ Ldr(args_offset,
+ ContextMemOperand(global_ctx,
+ Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX));
+
+ // x0 alloc_obj pointer to allocated objects: parameter array and
+ // arguments object
+ // x1 param_count_smi number of parameters passed to function (smi)
+ // x2 params pointer to parameters
+ // x3 function function pointer
+ // x4 args_offset offset to arguments boilerplate
+ // x13 param_count number of parameters passed to function
+
+ // Copy the JS object part.
+ __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7),
+ JSObject::kHeaderSize / kPointerSize);
+
+ // Set the smi-tagged length as an in-object property.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize;
+ __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ Cbz(param_count, &done);
+
+ // Set up the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ Register elements = x5;
+ __ Add(elements, alloc_obj, Heap::kStrictArgumentsObjectSize);
+ __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
+ __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
+ __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
+ __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // x0 alloc_obj pointer to allocated objects: parameter array and
+ // arguments object
+ // x1 param_count_smi number of parameters passed to function (smi)
+ // x2 params pointer to parameters
+ // x3 function function pointer
+ // x4 array pointer to array slot (uninit)
+ // x5 elements pointer to elements array of alloc_obj
+ // x13 param_count number of parameters passed to function
+
+ // Copy the fixed array slots.
+ Label loop;
+ Register array = x4;
+ // Set up pointer to first array slot.
+ __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+
+ __ Bind(&loop);
+ // Pre-decrement the parameters pointer by kPointerSize on each iteration.
+ // Pre-decrement in order to skip receiver.
+ __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex));
+ // Post-increment elements by kPointerSize on each iteration.
+ __ Str(x10, MemOperand(array, kPointerSize, PostIndex));
+ __ Sub(param_count, param_count, 1);
+ __ Cbnz(param_count, &loop);
+
+ // Return from stub.
+ __ Bind(&done);
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ __ Bind(&runtime);
+ __ Push(function, params, param_count_smi);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+
+ // Stack frame on entry.
+ // jssp[0]: last_match_info (expected JSArray)
+ // jssp[8]: previous index
+ // jssp[16]: subject string
+ // jssp[24]: JSRegExp object
+ Label runtime;
+
+ // Use of registers for this function.
+
+ // Variable registers:
+ // x10-x13 used as scratch registers
+ // w0 string_type type of subject string
+ // x2 jsstring_length subject string length
+ // x3 jsregexp_object JSRegExp object
+ // w4 string_encoding ASCII or UC16
+ // w5 sliced_string_offset if the string is a SlicedString
+ // offset to the underlying string
+ // w6 string_representation groups attributes of the string:
+ // - is a string
+ // - type of the string
+ // - is a short external string
+ Register string_type = w0;
+ Register jsstring_length = x2;
+ Register jsregexp_object = x3;
+ Register string_encoding = w4;
+ Register sliced_string_offset = w5;
+ Register string_representation = w6;
+
+ // These are in callee save registers and will be preserved by the call
+ // to the native RegExp code, as this code is called using the normal
+ // C calling convention. When calling directly from generated code the
+ // native RegExp code will not do a GC and therefore the content of
+ // these registers are safe to use after the call.
+
+ // x19 subject subject string
+ // x20 regexp_data RegExp data (FixedArray)
+ // x21 last_match_info_elements info relative to the last match
+ // (FixedArray)
+ // x22 code_object generated regexp code
+ Register subject = x19;
+ Register regexp_data = x20;
+ Register last_match_info_elements = x21;
+ Register code_object = x22;
+
+ // TODO(jbramley): Is it necessary to preserve these? I don't think ARM does.
+ CPURegList used_callee_saved_registers(subject,
+ regexp_data,
+ last_match_info_elements,
+ code_object);
+ __ PushCPURegList(used_callee_saved_registers);
+
+ // Stack frame.
+ // jssp[0] : x19
+ // jssp[8] : x20
+ // jssp[16]: x21
+ // jssp[24]: x22
+ // jssp[32]: last_match_info (JSArray)
+ // jssp[40]: previous index
+ // jssp[48]: subject string
+ // jssp[56]: JSRegExp object
+
+ const int kLastMatchInfoOffset = 4 * kPointerSize;
+ const int kPreviousIndexOffset = 5 * kPointerSize;
+ const int kSubjectOffset = 6 * kPointerSize;
+ const int kJSRegExpOffset = 7 * kPointerSize;
+
+ // Ensure that a RegExp stack is allocated.
+ Isolate* isolate = masm->isolate();
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address(isolate);
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size(isolate);
+ __ Mov(x10, address_of_regexp_stack_memory_size);
+ __ Ldr(x10, MemOperand(x10));
+ __ Cbz(x10, &runtime);
+
+ // Check that the first argument is a JSRegExp object.
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(jsregexp_object, kJSRegExpOffset);
+ __ JumpIfSmi(jsregexp_object, &runtime);
+ __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
+
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Tst(regexp_data, kSmiTagMask);
+ __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+ __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
+ __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+ }
+
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+ __ Cmp(x10, Smi::FromInt(JSRegExp::IRREGEXP));
+ __ B(ne, &runtime);
+
+ // Check that the number of captures fit in the static offsets vector buffer.
+ // We have always at least one capture for the whole match, plus additional
+ // ones due to capturing parentheses. A capture takes 2 registers.
+ // The number of capture registers then is (number_of_captures + 1) * 2.
+ __ Ldrsw(x10,
+ UntagSmiFieldMemOperand(regexp_data,
+ JSRegExp::kIrregexpCaptureCountOffset));
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // number_of_captures * 2 <= offsets vector size - 2
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ __ Add(x10, x10, x10);
+ __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
+ __ B(hi, &runtime);
+
+ // Initialize offset for possibly sliced string.
+ __ Mov(sliced_string_offset, 0);
+
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(subject, kSubjectOffset);
+ __ JumpIfSmi(subject, &runtime);
+
+ __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+
+ __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
+
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential string? If yes, go to (5).
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ // (3) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (4) Is subject external? If yes, go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (6) Not a long external string? If yes, go to (8).
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ // Go to (5).
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+
+ Label check_underlying; // (4)
+ Label seq_string; // (5)
+ Label not_seq_nor_cons; // (6)
+ Label external_string; // (7)
+ Label not_long_external; // (8)
+
+ // (1) Sequential string? If yes, go to (5).
+ __ And(string_representation,
+ string_type,
+ kIsNotStringMask |
+ kStringRepresentationMask |
+ kShortExternalStringMask);
+ // We depend on the fact that Strings of type
+ // SeqString and not ShortExternalString are defined
+ // by the following pattern:
+ // string_type: 0XX0 XX00
+ // ^ ^ ^^
+ // | | ||
+ // | | is a SeqString
+ // | is not a short external String
+ // is a String
+ STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ Cbz(string_representation, &seq_string); // Go to (5).
+
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+ STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+ STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+ STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
+ __ Cmp(string_representation, kExternalStringTag);
+ __ B(ge, &not_seq_nor_cons); // Go to (6).
+
+ // (3) Cons string. Check that it's flat.
+ __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
+ __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
+ // Replace subject with first string.
+ __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+
+ // (4) Is subject external? If yes, go to (7).
+ __ Bind(&check_underlying);
+ // Reload the string type.
+ __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kSeqStringTag == 0);
+ // The underlying external string is never a short external string.
+ STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ TestAndBranchIfAnySet(string_type.X(),
+ kStringRepresentationMask,
+ &external_string); // Go to (7).
+
+ // (5) Sequential string. Load regexp code according to encoding.
+ __ Bind(&seq_string);
+
+ // Check that the third argument is a positive smi less than the subject
+ // string length. A negative value will be greater (unsigned comparison).
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(x10, kPreviousIndexOffset);
+ __ JumpIfNotSmi(x10, &runtime);
+ __ Cmp(jsstring_length, x10);
+ __ B(ls, &runtime);
+
+ // Argument 2 (x1): We need to load argument 2 (the previous index) into x1
+ // before entering the exit frame.
+ __ SmiUntag(x1, x10);
+
+ // The third bit determines the string encoding in string_type.
+ STATIC_ASSERT(kOneByteStringTag == 0x04);
+ STATIC_ASSERT(kTwoByteStringTag == 0x00);
+ STATIC_ASSERT(kStringEncodingMask == 0x04);
+
+ // Find the code object based on the assumptions above.
+ // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
+ // of kPointerSize to reach the latter.
+ ASSERT_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize,
+ JSRegExp::kDataUC16CodeOffset);
+ __ Mov(x10, kPointerSize);
+ // We will need the encoding later: ASCII = 0x04
+ // UC16 = 0x00
+ __ Ands(string_encoding, string_type, kStringEncodingMask);
+ __ CzeroX(x10, ne);
+ __ Add(x10, regexp_data, x10);
+ __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset));
+
+ // (E) Carry on. String handling is done.
+
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // a smi (code flushing support).
+ __ JumpIfSmi(code_object, &runtime);
+
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1,
+ x10,
+ x11);
+
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ __ EnterExitFrame(false, x10, 1);
+ ASSERT(csp.Is(__ StackPointer()));
+
+ // We have 9 arguments to pass to the regexp code, therefore we have to pass
+ // one on the stack and the rest as registers.
+
+ // Note that the placement of the argument on the stack isn't standard
+ // AAPCS64:
+ // csp[0]: Space for the return address placed by DirectCEntryStub.
+ // csp[8]: Argument 9, the current isolate address.
+
+ __ Mov(x10, ExternalReference::isolate_address(isolate));
+ __ Poke(x10, kPointerSize);
+
+ Register length = w11;
+ Register previous_index_in_bytes = w12;
+ Register start = x13;
+
+ // Load start of the subject string.
+ __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
+ // Load the length from the original subject string from the previous stack
+ // frame. Therefore we have to use fp, which points exactly to two pointer
+ // sizes below the previous sp. (Because creating a new stack frame pushes
+ // the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
+ __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
+ __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
+
+ // Handle UC16 encoding, two bytes make one character.
+ // string_encoding: if ASCII: 0x04
+ // if UC16: 0x00
+ STATIC_ASSERT(kStringEncodingMask == 0x04);
+ __ Ubfx(string_encoding, string_encoding, 2, 1);
+ __ Eor(string_encoding, string_encoding, 1);
+ // string_encoding: if ASCII: 0
+ // if UC16: 1
+
+ // Convert string positions from characters to bytes.
+ // Previous index is in x1.
+ __ Lsl(previous_index_in_bytes, w1, string_encoding);
+ __ Lsl(length, length, string_encoding);
+ __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
+
+ // Argument 1 (x0): Subject string.
+ __ Mov(x0, subject);
+
+ // Argument 2 (x1): Previous index, already there.
+
+ // Argument 3 (x2): Get the start of input.
+ // Start of input = start of string + previous index + substring offset
+ // (0 if the string
+ // is not sliced).
+ __ Add(w10, previous_index_in_bytes, sliced_string_offset);
+ __ Add(x2, start, Operand(w10, UXTW));
+
+ // Argument 4 (x3):
+ // End of input = start of input + (length of input - previous index)
+ __ Sub(w10, length, previous_index_in_bytes);
+ __ Add(x3, x2, Operand(w10, UXTW));
+
+ // Argument 5 (x4): static offsets vector buffer.
+ __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate));
+
+ // Argument 6 (x5): Set the number of capture registers to zero to force
+ // global regexps to behave as non-global. This stub is not used for global
+ // regexps.
+ __ Mov(x5, 0);
+
+ // Argument 7 (x6): Start (high end) of backtracking stack memory area.
+ __ Mov(x10, address_of_regexp_stack_memory_address);
+ __ Ldr(x10, MemOperand(x10));
+ __ Mov(x11, address_of_regexp_stack_memory_size);
+ __ Ldr(x11, MemOperand(x11));
+ __ Add(x6, x10, x11);
+
+ // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
+ __ Mov(x7, 1);
+
+ // Locate the code entry and call it.
+ __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm, code_object);
+
+ __ LeaveExitFrame(false, x10, true);
+
+ // The generated regexp code returns an int32 in w0.
+ Label failure, exception;
+ __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
+ __ CompareAndBranch(w0,
+ NativeRegExpMacroAssembler::EXCEPTION,
+ eq,
+ &exception);
+ __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
+
+ // Success: process the result from the native regexp code.
+ Register number_of_capture_registers = x12;
+
+ // Calculate number of capture registers (number_of_captures + 1) * 2
+ // and store it in the last match info.
+ __ Ldrsw(x10,
+ UntagSmiFieldMemOperand(regexp_data,
+ JSRegExp::kIrregexpCaptureCountOffset));
+ __ Add(x10, x10, x10);
+ __ Add(number_of_capture_registers, x10, 2);
+
+ // Check that the fourth object is a JSArray object.
+ ASSERT(jssp.Is(__ StackPointer()));
+ __ Peek(x10, kLastMatchInfoOffset);
+ __ JumpIfSmi(x10, &runtime);
+ __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
+
+ // Check that the JSArray is the fast case.
+ __ Ldr(last_match_info_elements,
+ FieldMemOperand(x10, JSArray::kElementsOffset));
+ __ Ldr(x10,
+ FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
+
+ // Check that the last match info has space for the capture registers and the
+ // additional information (overhead).
+ // (number_of_captures + 1) * 2 + overhead <= last match info size
+ // (number_of_captures * 2) + 2 + overhead <= last match info size
+ // number_of_capture_registers + overhead <= last match info size
+ __ Ldrsw(x10,
+ UntagSmiFieldMemOperand(last_match_info_elements,
+ FixedArray::kLengthOffset));
+ __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead);
+ __ Cmp(x11, x10);
+ __ B(gt, &runtime);
+
+ // Store the capture count.
+ __ SmiTag(x10, number_of_capture_registers);
+ __ Str(x10,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastCaptureCountOffset));
+ // Store last subject and last input.
+ __ Str(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset));
+ // Use x10 as the subject string in order to only need
+ // one RecordWriteStub.
+ __ Mov(x10, subject);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset,
+ x10,
+ x11,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ __ Str(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastInputOffset));
+ __ Mov(x10, subject);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastInputOffset,
+ x10,
+ x11,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+
+ Register last_match_offsets = x13;
+ Register offsets_vector_index = x14;
+ Register current_offset = x15;
+
+ // Get the static offsets vector filled by the native regexp code
+ // and fill the last match info.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector(isolate);
+ __ Mov(offsets_vector_index, address_of_static_offsets_vector);
+
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // iterates down to zero (inclusive).
+ __ Add(last_match_offsets,
+ last_match_info_elements,
+ RegExpImpl::kFirstCaptureOffset - kHeapObjectTag);
+ __ Bind(&next_capture);
+ __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
+ __ B(mi, &done);
+ // Read two 32 bit values from the static offsets vector buffer into
+ // an X register
+ __ Ldr(current_offset,
+ MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex));
+ // Store the smi values in the last match info.
+ __ SmiTag(x10, current_offset);
+ // Clearing the 32 bottom bits gives us a Smi.
+ STATIC_ASSERT(kSmiShift == 32);
+ __ And(x11, current_offset, ~kWRegMask);
+ __ Stp(x10,
+ x11,
+ MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
+ __ B(&next_capture);
+ __ Bind(&done);
+
+ // Return last match info.
+ __ Peek(x0, kLastMatchInfoOffset);
+ __ PopCPURegList(used_callee_saved_registers);
+ // Drop the 4 arguments of the stub from the stack.
+ __ Drop(4);
+ __ Ret();
+
+ __ Bind(&exception);
+ Register exception_value = x0;
+ // A stack overflow (on the backtrack stack) may have occured
+ // in the RegExp code but no exception has been created yet.
+ // If there is no pending exception, handle that in the runtime system.
+ __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
+ __ Mov(x11,
+ Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ Ldr(exception_value, MemOperand(x11));
+ __ Cmp(x10, exception_value);
+ __ B(eq, &runtime);
+
+ __ Str(x10, MemOperand(x11)); // Clear pending exception.
+
+ // Check if the exception is a termination. If so, throw as uncatchable.
+ Label termination_exception;
+ __ JumpIfRoot(exception_value,
+ Heap::kTerminationExceptionRootIndex,
+ &termination_exception);
+
+ __ Throw(exception_value, x10, x11, x12, x13);
+
+ __ Bind(&termination_exception);
+ __ ThrowUncatchable(exception_value, x10, x11, x12, x13);
+
+ __ Bind(&failure);
+ __ Mov(x0, Operand(masm->isolate()->factory()->null_value()));
+ __ PopCPURegList(used_callee_saved_registers);
+ // Drop the 4 arguments of the stub from the stack.
+ __ Drop(4);
+ __ Ret();
+
+ __ Bind(&runtime);
+ __ PopCPURegList(used_callee_saved_registers);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+
+ // Deferred code for string handling.
+ // (6) Not a long external string? If yes, go to (8).
+ __ Bind(&not_seq_nor_cons);
+ // Compare flags are still set.
+ __ B(ne, &not_long_external); // Go to (8).
+
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ __ Bind(&external_string);
+ if (masm->emit_debug_code()) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Tst(x10, kIsIndirectStringMask);
+ __ Check(eq, kExternalStringExpectedButNotFound);
+ __ And(x10, x10, kStringRepresentationMask);
+ __ Cmp(x10, 0);
+ __ Check(ne, kExternalStringExpectedButNotFound);
+ }
+ __ Ldr(subject,
+ FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ B(&seq_string); // Go to (5).
+
+ // (8) If this is a short external string or not a string, bail out to
+ // runtime.
+ __ Bind(&not_long_external);
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ TestAndBranchIfAnySet(string_representation,
+ kShortExternalStringMask | kIsNotStringMask,
+ &runtime);
+
+ // (9) Sliced string. Replace subject with parent.
+ __ Ldr(sliced_string_offset,
+ UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
+ __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+ __ B(&check_underlying); // Go to (4).
+#endif
+}
+
+
+static void GenerateRecordCallTarget(MacroAssembler* masm,
+ Register argc,
+ Register function,
+ Register feedback_vector,
+ Register index,
+ Register scratch1,
+ Register scratch2) {
+ ASM_LOCATION("GenerateRecordCallTarget");
+ ASSERT(!AreAliased(scratch1, scratch2,
+ argc, function, feedback_vector, index));
+ // Cache the called function in a feedback vector slot. Cache states are
+ // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
+ // argc : number of arguments to the construct function
+ // function : the function to call
+ // feedback_vector : the feedback vector
+ // index : slot in feedback vector (smi)
+ Label initialize, done, miss, megamorphic, not_array_function;
+
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->uninitialized_symbol());
+
+ // Load the cache state.
+ __ Add(scratch1, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ Cmp(scratch1, function);
+ __ B(eq, &done);
+
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in scratch1 register.
+ __ Ldr(scratch2, FieldMemOperand(scratch1, AllocationSite::kMapOffset));
+ __ JumpIfNotRoot(scratch2, Heap::kAllocationSiteMapRootIndex, &miss);
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
+ __ Cmp(function, scratch1);
+ __ B(ne, &megamorphic);
+ __ B(&done);
+ }
+
+ __ Bind(&miss);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ JumpIfRoot(scratch1, Heap::kUninitializedSymbolRootIndex, &initialize);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ Bind(&megamorphic);
+ __ Add(scratch1, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ LoadRoot(scratch2, Heap::kMegamorphicSymbolRootIndex);
+ __ Str(scratch2, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ B(&done);
+
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ Bind(&initialize);
+
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
+ __ Cmp(function, scratch1);
+ __ B(ne, &not_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ CreateAllocationSiteStub create_stub;
+
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(argc);
+ __ Push(argc, function, feedback_vector, index);
+
+ // CreateAllocationSiteStub expect the feedback vector in x2 and the slot
+ // index in x3.
+ ASSERT(feedback_vector.Is(x2) && index.Is(x3));
+ __ CallStub(&create_stub);
+
+ __ Pop(index, feedback_vector, function, argc);
+ __ SmiUntag(argc);
+ }
+ __ B(&done);
+
+ __ Bind(&not_array_function);
+ }
+
+ // An uninitialized cache is patched with the function.
+
+ __ Add(scratch1, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Str(function, MemOperand(scratch1, 0));
+
+ __ Push(function);
+ __ RecordWrite(feedback_vector, scratch1, function, kLRHasNotBeenSaved,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(function);
+
+ __ Bind(&done);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("CallFunctionStub::Generate");
+ // x1 function the function to call
+ // x2 : feedback vector
+ // x3 : slot in feedback vector (smi) (if x2 is not the megamorphic symbol)
+ Register function = x1;
+ Register cache_cell = x2;
+ Register slot = x3;
+ Register type = x4;
+ Label slow, non_function, wrap, cont;
+
+ // TODO(jbramley): This function has a lot of unnamed registers. Name them,
+ // and tidy things up a bit.
+
+ if (NeedsChecks()) {
+ // Check that the function is really a JavaScript function.
+ __ JumpIfSmi(function, &non_function);
+
+ // Goto slow case if we do not have a function.
+ __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm, x0, function, cache_cell, slot, x4, x5);
+ // Type information was updated. Because we may call Array, which
+ // expects either undefined or an AllocationSite in ebx we need
+ // to set ebx to undefined.
+ __ LoadRoot(cache_cell, Heap::kUndefinedValueRootIndex);
+ }
+ }
+
+ // Fast-case: Invoke the function now.
+ // x1 function pushed function
+ ParameterCount actual(argc_);
+
+ if (CallAsMethod()) {
+ if (NeedsChecks()) {
+ // Do not transform the receiver for strict mode functions.
+ __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, &cont);
+
+ // Do not transform the receiver for native (Compilerhints already in x3).
+ __ Tbnz(w4, SharedFunctionInfo::kNative, &cont);
+ }
+
+ // Compute the receiver in sloppy mode.
+ __ Peek(x3, argc_ * kPointerSize);
+
+ if (NeedsChecks()) {
+ __ JumpIfSmi(x3, &wrap);
+ __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
+ } else {
+ __ B(&wrap);
+ }
+
+ __ Bind(&cont);
+ }
+ __ InvokeFunction(function,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper());
+
+ if (NeedsChecks()) {
+ // Slow-case: Non-function called.
+ __ Bind(&slow);
+ if (RecordCallTarget()) {
+ // If there is a call target cache, mark it megamorphic in the
+ // non-function case. MegamorphicSentinel is an immortal immovable object
+ // (megamorphic symbol) so no write barrier is needed.
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ __ Add(x12, cache_cell, Operand::UntagSmiAndScale(slot,
+ kPointerSizeLog2));
+ __ LoadRoot(x11, Heap::kMegamorphicSymbolRootIndex);
+ __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
+ }
+ // Check for function proxy.
+ // x10 : function type.
+ __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, &non_function);
+ __ Push(function); // put proxy as additional argument
+ __ Mov(x0, argc_ + 1);
+ __ Mov(x2, 0);
+ __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
+ {
+ Handle<Code> adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ Bind(&non_function);
+ __ Poke(function, argc_ * kXRegSize);
+ __ Mov(x0, argc_); // Set up the number of arguments.
+ __ Mov(x2, 0);
+ __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ if (CallAsMethod()) {
+ __ Bind(&wrap);
+ // Wrap the receiver and patch it back onto the stack.
+ { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ Push(x1, x3);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Pop(x1);
+ }
+ __ Poke(x0, argc_ * kPointerSize);
+ __ B(&cont);
+ }
+}
+
+
+void CallConstructStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("CallConstructStub::Generate");
+ // x0 : number of arguments
+ // x1 : the function to call
+ // x2 : feedback vector
+ // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
+ Register function = x1;
+ Label slow, non_function_call;
+
+ // Check that the function is not a smi.
+ __ JumpIfSmi(function, &non_function_call);
+ // Check that the function is a JSFunction.
+ Register object_type = x10;
+ __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
+ &slow);
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5);
+
+ __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into x2.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by x3 + 1.
+ __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into x2, or undefined.
+ __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
+ __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
+ __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
+ &feedback_register_initialized);
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(x2, x5);
+ }
+
+ // Jump to the function-specific construct stub.
+ Register jump_reg = x4;
+ Register shared_func_info = jump_reg;
+ Register cons_stub = jump_reg;
+ Register cons_stub_code = jump_reg;
+ __ Ldr(shared_func_info,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(cons_stub,
+ FieldMemOperand(shared_func_info,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ Add(cons_stub_code, cons_stub, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(cons_stub_code);
+
+ Label do_call;
+ __ Bind(&slow);
+ __ Cmp(object_type, JS_FUNCTION_PROXY_TYPE);
+ __ B(ne, &non_function_call);
+ __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ B(&do_call);
+
+ __ Bind(&non_function_call);
+ __ GetBuiltinFunction(x1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+
+ __ Bind(&do_call);
+ // Set expected number of arguments to zero (not changing x0).
+ __ Mov(x2, 0);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ // If the receiver is a smi trigger the non-string case.
+ __ JumpIfSmi(object_, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+
+ // If the receiver is not a string trigger the non-string case.
+ __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
+
+ // If the index is non-smi trigger the non-smi case.
+ __ JumpIfNotSmi(index_, &index_not_smi_);
+
+ __ Bind(&got_smi_index_);
+ // Check for index out of range.
+ __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset));
+ __ Cmp(result_, Operand::UntagSmi(index_));
+ __ B(ls, index_out_of_range_);
+
+ __ SmiUntag(index_);
+
+ StringCharLoadGenerator::Generate(masm,
+ object_,
+ index_.W(),
+ result_,
+ &call_runtime_);
+ __ SmiTag(result_);
+ __ Bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
+
+ __ Bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_,
+ result_,
+ Heap::kHeapNumberMapRootIndex,
+ index_not_number_,
+ DONT_DO_SMI_CHECK);
+ call_helper.BeforeCall(masm);
+ // Save object_ on the stack and pass index_ as argument for runtime call.
+ __ Push(object_, index_);
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
+ }
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ Mov(index_, x0);
+ __ Pop(object_);
+ // Reload the instance type.
+ __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+
+ // If index is still not a smi, it must be out of range.
+ __ JumpIfNotSmi(index_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ B(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ Bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ SmiTag(index_);
+ __ Push(object_, index_);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
+ __ Mov(result_, x0);
+ call_helper.AfterCall(masm);
+ __ B(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
+}
+
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ __ JumpIfNotSmi(code_, &slow_case_);
+ __ Cmp(code_, Smi::FromInt(String::kMaxOneByteCharCode));
+ __ B(hi, &slow_case_);
+
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ // At this point code register contains smi tagged ASCII char code.
+ STATIC_ASSERT(kSmiShift > kPointerSizeLog2);
+ __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2));
+ __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+ __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
+ __ Bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper) {
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
+
+ __ Bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ Push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ Mov(result_, x0);
+ call_helper.AfterCall(masm);
+ __ B(&exit_);
+
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
+}
+
+
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+ // Inputs are in x0 (lhs) and x1 (rhs).
+ ASSERT(state_ == CompareIC::SMI);
+ ASM_LOCATION("ICCompareStub[Smis]");
+ Label miss;
+ // Bail out (to 'miss') unless both x0 and x1 are smis.
+ __ JumpIfEitherNotSmi(x0, x1, &miss);
+
+ if (GetCondition() == eq) {
+ // For equality we do not care about the sign of the result.
+ __ Sub(x0, x0, x1);
+ } else {
+ // Untag before subtracting to avoid handling overflow.
+ __ SmiUntag(x1);
+ __ Sub(x0, x1, Operand::UntagSmi(x0));
+ }
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::NUMBER);
+ ASM_LOCATION("ICCompareStub[HeapNumbers]");
+
+ Label unordered, maybe_undefined1, maybe_undefined2;
+ Label miss, handle_lhs, values_in_d_regs;
+ Label untag_rhs, untag_lhs;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+ FPRegister rhs_d = d0;
+ FPRegister lhs_d = d1;
+
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(lhs, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(rhs, &miss);
+ }
+
+ __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag);
+ __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag);
+
+ // Load rhs if it's a heap number.
+ __ JumpIfSmi(rhs, &handle_lhs);
+ __ CheckMap(rhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
+ __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+
+ // Load lhs if it's a heap number.
+ __ Bind(&handle_lhs);
+ __ JumpIfSmi(lhs, &values_in_d_regs);
+ __ CheckMap(lhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+
+ __ Bind(&values_in_d_regs);
+ __ Fcmp(lhs_d, rhs_d);
+ __ B(vs, &unordered); // Overflow flag set if either is NaN.
+ STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
+ __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
+ __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
+ __ Ret();
+
+ __ Bind(&unordered);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
+ __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+
+ __ Bind(&maybe_undefined1);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
+ __ JumpIfSmi(lhs, &unordered);
+ __ JumpIfNotObjectType(lhs, x10, x10, HEAP_NUMBER_TYPE, &maybe_undefined2);
+ __ B(&unordered);
+ }
+
+ __ Bind(&maybe_undefined2);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
+ }
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
+ ASM_LOCATION("ICCompareStub[InternalizedStrings]");
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(lhs, rhs, &miss);
+
+ // Check that both operands are internalized strings.
+ Register rhs_map = x10;
+ Register lhs_map = x11;
+ Register rhs_type = x10;
+ Register lhs_type = x11;
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
+ __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
+
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ __ Orr(x12, lhs_type, rhs_type);
+ __ TestAndBranchIfAnySet(
+ x12, kIsNotStringMask | kIsNotInternalizedMask, &miss);
+
+ // Internalized strings are compared by identity.
+ STATIC_ASSERT(EQUAL == 0);
+ __ Cmp(lhs, rhs);
+ __ Cset(result, ne);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::UNIQUE_NAME);
+ ASM_LOCATION("ICCompareStub[UniqueNames]");
+ ASSERT(GetCondition() == eq);
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ Register lhs_instance_type = w2;
+ Register rhs_instance_type = w3;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(lhs, rhs, &miss);
+
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset));
+
+ // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
+ // should have kInternalizedTag set.
+ __ JumpIfNotUniqueName(lhs_instance_type, &miss);
+ __ JumpIfNotUniqueName(rhs_instance_type, &miss);
+
+ // Unique names are compared by identity.
+ STATIC_ASSERT(EQUAL == 0);
+ __ Cmp(lhs, rhs);
+ __ Cset(result, ne);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::STRING);
+ ASM_LOCATION("ICCompareStub[Strings]");
+
+ Label miss;
+
+ bool equality = Token::IsEqualityOp(op_);
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(rhs, lhs, &miss);
+
+ // Check that both operands are strings.
+ Register rhs_map = x10;
+ Register lhs_map = x11;
+ Register rhs_type = x10;
+ Register lhs_type = x11;
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
+ __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ Orr(x12, lhs_type, rhs_type);
+ __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss);
+
+ // Fast check for identical strings.
+ Label not_equal;
+ __ Cmp(lhs, rhs);
+ __ B(ne, &not_equal);
+ __ Mov(result, EQUAL);
+ __ Ret();
+
+ __ Bind(&not_equal);
+ // Handle not identical strings
+
+ // Check that both strings are internalized strings. If they are, we're done
+ // because we already know they are not identical. We know they are both
+ // strings.
+ if (equality) {
+ ASSERT(GetCondition() == eq);
+ STATIC_ASSERT(kInternalizedTag == 0);
+ Label not_internalized_strings;
+ __ Orr(x12, lhs_type, rhs_type);
+ __ TestAndBranchIfAnySet(
+ x12, kIsNotInternalizedMask, &not_internalized_strings);
+ // Result is in rhs (x0), and not EQUAL, as rhs is not a smi.
+ __ Ret();
+ __ Bind(&not_internalized_strings);
+ }
+
+ // Check that both strings are sequential ASCII.
+ Label runtime;
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(
+ lhs_type, rhs_type, x12, x13, &runtime);
+
+ // Compare flat ASCII strings. Returns when done.
+ if (equality) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, lhs, rhs, x10, x11, x12);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(
+ masm, lhs, rhs, x10, x11, x12, x13);
+ }
+
+ // Handle more complex cases in runtime.
+ __ Bind(&runtime);
+ __ Push(lhs, rhs);
+ if (equality) {
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ } else {
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+ }
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::OBJECT);
+ ASM_LOCATION("ICCompareStub[Objects]");
+
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ __ JumpIfEitherSmi(rhs, lhs, &miss);
+
+ __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
+ __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
+
+ ASSERT(GetCondition() == eq);
+ __ Sub(result, rhs, lhs);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+ ASM_LOCATION("ICCompareStub[KnownObjects]");
+
+ Label miss;
+
+ Register result = x0;
+ Register rhs = x0;
+ Register lhs = x1;
+
+ __ JumpIfEitherSmi(rhs, lhs, &miss);
+
+ Register rhs_map = x10;
+ Register lhs_map = x11;
+ __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Cmp(rhs_map, Operand(known_map_));
+ __ B(ne, &miss);
+ __ Cmp(lhs_map, Operand(known_map_));
+ __ B(ne, &miss);
+
+ __ Sub(result, rhs, lhs);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+// This method handles the case where a compare stub had the wrong
+// implementation. It calls a miss handler, which re-writes the stub. All other
+// ICCompareStub::Generate* methods should fall back into this one if their
+// operands were not the expected types.
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+ ASM_LOCATION("ICCompareStub[Miss]");
+
+ Register stub_entry = x11;
+ {
+ ExternalReference miss =
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ Register op = x10;
+ Register left = x1;
+ Register right = x0;
+ // Preserve some caller-saved registers.
+ __ Push(x1, x0, lr);
+ // Push the arguments.
+ __ Mov(op, Smi::FromInt(op_));
+ __ Push(left, right, op);
+
+ // Call the miss handler. This also pops the arguments.
+ __ CallExternalReference(miss, 3);
+
+ // Compute the entry point of the rewritten stub.
+ __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
+ // Restore caller-saved registers.
+ __ Pop(lr, x0, x1);
+ }
+
+ // Tail-call to the new stub.
+ __ Jump(stub_entry);
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ ASSERT(!AreAliased(hash, character));
+
+ // hash = character + (character << 10);
+ __ LoadRoot(hash, Heap::kHashSeedRootIndex);
+ // Untag smi seed and add the character.
+ __ Add(hash, character, Operand(hash, LSR, kSmiShift));
+
+ // Compute hashes modulo 2^32 using a 32-bit W register.
+ Register hash_w = hash.W();
+
+ // hash += hash << 10;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
+ // hash ^= hash >> 6;
+ __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ ASSERT(!AreAliased(hash, character));
+
+ // hash += character;
+ __ Add(hash, hash, character);
+
+ // Compute hashes modulo 2^32 using a 32-bit W register.
+ Register hash_w = hash.W();
+
+ // hash += hash << 10;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
+ // hash ^= hash >> 6;
+ __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch) {
+ // Compute hashes modulo 2^32 using a 32-bit W register.
+ Register hash_w = hash.W();
+ Register scratch_w = scratch.W();
+ ASSERT(!AreAliased(hash_w, scratch_w));
+
+ // hash += hash << 3;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3));
+ // hash ^= hash >> 11;
+ __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11));
+ // hash += hash << 15;
+ __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15));
+
+ __ Ands(hash_w, hash_w, String::kHashBitMask);
+
+ // if (hash == 0) hash = 27;
+ __ Mov(scratch_w, StringHasher::kZeroHash);
+ __ Csel(hash_w, scratch_w, hash_w, eq);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("SubStringStub::Generate");
+ Label runtime;
+
+ // Stack frame on entry.
+ // lr: return address
+ // jssp[0]: substring "to" offset
+ // jssp[8]: substring "from" offset
+ // jssp[16]: pointer to string object
+
+ // This stub is called from the native-call %_SubString(...), so
+ // nothing can be assumed about the arguments. It is tested that:
+ // "string" is a sequential string,
+ // both "from" and "to" are smis, and
+ // 0 <= from <= to <= string.length (in debug mode.)
+ // If any of these assumptions fail, we call the runtime system.
+
+ static const int kToOffset = 0 * kPointerSize;
+ static const int kFromOffset = 1 * kPointerSize;
+ static const int kStringOffset = 2 * kPointerSize;
+
+ Register to = x0;
+ Register from = x15;
+ Register input_string = x10;
+ Register input_length = x11;
+ Register input_type = x12;
+ Register result_string = x0;
+ Register result_length = x1;
+ Register temp = x3;
+
+ __ Peek(to, kToOffset);
+ __ Peek(from, kFromOffset);
+
+ // Check that both from and to are smis. If not, jump to runtime.
+ __ JumpIfEitherNotSmi(from, to, &runtime);
+ __ SmiUntag(from);
+ __ SmiUntag(to);
+
+ // Calculate difference between from and to. If to < from, branch to runtime.
+ __ Subs(result_length, to, from);
+ __ B(mi, &runtime);
+
+ // Check from is positive.
+ __ Tbnz(from, kWSignBit, &runtime);
+
+ // Make sure first argument is a string.
+ __ Peek(input_string, kStringOffset);
+ __ JumpIfSmi(input_string, &runtime);
+ __ IsObjectJSStringType(input_string, input_type, &runtime);
+
+ Label single_char;
+ __ Cmp(result_length, 1);
+ __ B(eq, &single_char);
+
+ // Short-cut for the case of trivial substring.
+ Label return_x0;
+ __ Ldrsw(input_length,
+ UntagSmiFieldMemOperand(input_string, String::kLengthOffset));
+
+ __ Cmp(result_length, input_length);
+ __ CmovX(x0, input_string, eq);
+ // Return original string.
+ __ B(eq, &return_x0);
+
+ // Longer than original string's length or negative: unsafe arguments.
+ __ B(hi, &runtime);
+
+ // Shorter than original string's length: an actual substring.
+
+ // x0 to substring end character offset
+ // x1 result_length length of substring result
+ // x10 input_string pointer to input string object
+ // x10 unpacked_string pointer to unpacked string object
+ // x11 input_length length of input string
+ // x12 input_type instance type of input string
+ // x15 from substring start character offset
+
+ // Deal with different string types: update the index if necessary and put
+ // the underlying string into register unpacked_string.
+ Label underlying_unpacked, sliced_string, seq_or_external_string;
+ Label update_instance_type;
+ // If the string is not indirect, it can only be sequential or external.
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+
+ // Test for string types, and branch/fall through to appropriate unpacking
+ // code.
+ __ Tst(input_type, kIsIndirectStringMask);
+ __ B(eq, &seq_or_external_string);
+ __ Tst(input_type, kSlicedNotConsMask);
+ __ B(ne, &sliced_string);
+
+ Register unpacked_string = input_string;
+
+ // Cons string. Check whether it is flat, then fetch first part.
+ __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset));
+ __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime);
+ __ Ldr(unpacked_string,
+ FieldMemOperand(input_string, ConsString::kFirstOffset));
+ __ B(&update_instance_type);
+
+ __ Bind(&sliced_string);
+ // Sliced string. Fetch parent and correct start index by offset.
+ __ Ldrsw(temp,
+ UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset));
+ __ Add(from, from, temp);
+ __ Ldr(unpacked_string,
+ FieldMemOperand(input_string, SlicedString::kParentOffset));
+
+ __ Bind(&update_instance_type);
+ __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
+ __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
+ // Now control must go to &underlying_unpacked. Since the no code is generated
+ // before then we fall through instead of generating a useless branch.
+
+ __ Bind(&seq_or_external_string);
+ // Sequential or external string. Registers unpacked_string and input_string
+ // alias, so there's nothing to do here.
+ // Note that if code is added here, the above code must be updated.
+
+ // x0 result_string pointer to result string object (uninit)
+ // x1 result_length length of substring result
+ // x10 unpacked_string pointer to unpacked string object
+ // x11 input_length length of input string
+ // x12 input_type instance type of input string
+ // x15 from substring start character offset
+ __ Bind(&underlying_unpacked);
+
+ if (FLAG_string_slices) {
+ Label copy_routine;
+ __ Cmp(result_length, SlicedString::kMinLength);
+ // Short slice. Copy instead of slicing.
+ __ B(lt, &copy_routine);
+ // Allocate new sliced string. At this point we do not reload the instance
+ // type including the string encoding because we simply rely on the info
+ // provided by the original string. It does not matter if the original
+ // string's encoding is wrong because we always have to recheck encoding of
+ // the newly created string's parent anyway due to externalized strings.
+ Label two_byte_slice, set_slice_header;
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
+ __ AllocateAsciiSlicedString(result_string, result_length, x3, x4,
+ &runtime);
+ __ B(&set_slice_header);
+
+ __ Bind(&two_byte_slice);
+ __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4,
+ &runtime);
+
+ __ Bind(&set_slice_header);
+ __ SmiTag(from);
+ __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset));
+ __ Str(unpacked_string,
+ FieldMemOperand(result_string, SlicedString::kParentOffset));
+ __ B(&return_x0);
+
+ __ Bind(&copy_routine);
+ }
+
+ // x0 result_string pointer to result string object (uninit)
+ // x1 result_length length of substring result
+ // x10 unpacked_string pointer to unpacked string object
+ // x11 input_length length of input string
+ // x12 input_type instance type of input string
+ // x13 unpacked_char0 pointer to first char of unpacked string (uninit)
+ // x13 substring_char0 pointer to first char of substring (uninit)
+ // x14 result_char0 pointer to first char of result (uninit)
+ // x15 from substring start character offset
+ Register unpacked_char0 = x13;
+ Register substring_char0 = x13;
+ Register result_char0 = x14;
+ Label two_byte_sequential, sequential_string, allocate_result;
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
+
+ __ Tst(input_type, kExternalStringTag);
+ __ B(eq, &sequential_string);
+
+ __ Tst(input_type, kShortExternalStringTag);
+ __ B(ne, &runtime);
+ __ Ldr(unpacked_char0,
+ FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset));
+ // unpacked_char0 points to the first character of the underlying string.
+ __ B(&allocate_result);
+
+ __ Bind(&sequential_string);
+ // Locate first character of underlying subject string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Add(unpacked_char0, unpacked_string,
+ SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ __ Bind(&allocate_result);
+ // Sequential ASCII string. Allocate the result.
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+ __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
+
+ // Allocate and copy the resulting ASCII string.
+ __ AllocateAsciiString(result_string, result_length, x3, x4, x5, &runtime);
+
+ // Locate first character of substring to copy.
+ __ Add(substring_char0, unpacked_char0, from);
+
+ // Locate first character of result.
+ __ Add(result_char0, result_string,
+ SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
+ __ B(&return_x0);
+
+ // Allocate and copy the resulting two-byte string.
+ __ Bind(&two_byte_sequential);
+ __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime);
+
+ // Locate first character of substring to copy.
+ __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1));
+
+ // Locate first character of result.
+ __ Add(result_char0, result_string,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ __ Add(result_length, result_length, result_length);
+ __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
+
+ __ Bind(&return_x0);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1, x3, x4);
+ __ Drop(3);
+ __ Ret();
+
+ __ Bind(&runtime);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
+
+ __ bind(&single_char);
+ // x1: result_length
+ // x10: input_string
+ // x12: input_type
+ // x15: from (untagged)
+ __ SmiTag(from);
+ StringCharAtGenerator generator(
+ input_string, from, result_length, x0,
+ &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ Drop(3);
+ __ Ret();
+ generator.SkipSlow(masm, &runtime);
+}
+
+
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3));
+ Register result = x0;
+ Register left_length = scratch1;
+ Register right_length = scratch2;
+
+ // Compare lengths. If lengths differ, strings can't be equal. Lengths are
+ // smis, and don't need to be untagged.
+ Label strings_not_equal, check_zero_length;
+ __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
+ __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
+ __ Cmp(left_length, right_length);
+ __ B(eq, &check_zero_length);
+
+ __ Bind(&strings_not_equal);
+ __ Mov(result, Smi::FromInt(NOT_EQUAL));
+ __ Ret();
+
+ // Check if the length is zero. If so, the strings must be equal (and empty.)
+ Label compare_chars;
+ __ Bind(&check_zero_length);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Cbnz(left_length, &compare_chars);
+ __ Mov(result, Smi::FromInt(EQUAL));
+ __ Ret();
+
+ // Compare characters. Falls through if all characters are equal.
+ __ Bind(&compare_chars);
+ GenerateAsciiCharsCompareLoop(masm, left, right, left_length, scratch2,
+ scratch3, &strings_not_equal);
+
+ // Characters in strings are equal.
+ __ Mov(result, Smi::FromInt(EQUAL));
+ __ Ret();
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
+ Label result_not_equal, compare_lengths;
+
+ // Find minimum length and length difference.
+ Register length_delta = scratch3;
+ __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Subs(length_delta, scratch1, scratch2);
+
+ Register min_length = scratch1;
+ __ Csel(min_length, scratch2, scratch1, gt);
+ __ Cbz(min_length, &compare_lengths);
+
+ // Compare loop.
+ GenerateAsciiCharsCompareLoop(masm,
+ left, right, min_length, scratch2, scratch4,
+ &result_not_equal);
+
+ // Compare lengths - strings up to min-length are equal.
+ __ Bind(&compare_lengths);
+
+ ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+
+ // Use length_delta as result if it's zero.
+ Register result = x0;
+ __ Subs(result, length_delta, 0);
+
+ __ Bind(&result_not_equal);
+ Register greater = x10;
+ Register less = x11;
+ __ Mov(greater, Smi::FromInt(GREATER));
+ __ Mov(less, Smi::FromInt(LESS));
+ __ CmovX(result, greater, gt);
+ __ CmovX(result, less, lt);
+ __ Ret();
+}
+
+
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* chars_not_equal) {
+ ASSERT(!AreAliased(left, right, length, scratch1, scratch2));
+
+ // Change index to run from -length to -1 by adding length to string
+ // start. This means that loop ends when index reaches zero, which
+ // doesn't need an additional compare.
+ __ SmiUntag(length);
+ __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ Add(left, left, scratch1);
+ __ Add(right, right, scratch1);
+
+ Register index = length;
+ __ Neg(index, length); // index = -length;
+
+ // Compare loop
+ Label loop;
+ __ Bind(&loop);
+ __ Ldrb(scratch1, MemOperand(left, index));
+ __ Ldrb(scratch2, MemOperand(right, index));
+ __ Cmp(scratch1, scratch2);
+ __ B(ne, chars_not_equal);
+ __ Add(index, index, 1);
+ __ Cbnz(index, &loop);
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ Counters* counters = masm->isolate()->counters();
+
+ // Stack frame on entry.
+ // sp[0]: right string
+ // sp[8]: left string
+ Register right = x10;
+ Register left = x11;
+ Register result = x0;
+ __ Pop(right, left);
+
+ Label not_same;
+ __ Subs(result, right, left);
+ __ B(ne, &not_same);
+ STATIC_ASSERT(EQUAL == 0);
+ __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
+ __ Ret();
+
+ __ Bind(&not_same);
+
+ // Check that both objects are sequential ASCII strings.
+ __ JumpIfEitherIsNotSequentialAsciiStrings(left, right, x12, x13, &runtime);
+
+ // Compare flat ASCII strings natively. Remove arguments from stack first,
+ // as this function will generate a return.
+ __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
+ GenerateCompareFlatAsciiStrings(masm, left, right, x12, x13, x14, x15);
+
+ __ Bind(&runtime);
+
+ // Push arguments back on to the stack.
+ // sp[0] = right string
+ // sp[8] = left string.
+ __ Push(left, right);
+
+ // Call the runtime.
+ // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+}
+
+
+void ArrayPushStub::Generate(MacroAssembler* masm) {
+ Register receiver = x0;
+
+ int argc = arguments_count();
+
+ if (argc == 0) {
+ // Nothing to do, just return the length.
+ __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Drop(argc + 1);
+ __ Ret();
+ return;
+ }
+
+ Isolate* isolate = masm->isolate();
+
+ if (argc != 1) {
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
+ }
+
+ Label call_builtin, attempt_to_grow_elements, with_write_barrier;
+
+ Register elements_length = x8;
+ Register length = x7;
+ Register elements = x6;
+ Register end_elements = x5;
+ Register value = x4;
+ // Get the elements array of the object.
+ __ Ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+ if (IsFastSmiOrObjectElementsKind(elements_kind())) {
+ // Check that the elements are in fast mode and writable.
+ __ CheckMap(elements,
+ x10,
+ Heap::kFixedArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
+ }
+
+ // Get the array's length and calculate new length.
+ __ Ldr(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Add(length, length, Smi::FromInt(argc));
+
+ // Check if we could survive without allocation.
+ __ Ldr(elements_length,
+ FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(length, elements_length);
+
+ const int kEndElementsOffset =
+ FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
+
+ if (IsFastSmiOrObjectElementsKind(elements_kind())) {
+ __ B(gt, &attempt_to_grow_elements);
+
+ // Check if value is a smi.
+ __ Peek(value, (argc - 1) * kPointerSize);
+ __ JumpIfNotSmi(value, &with_write_barrier);
+
+ // Store the value.
+ // We may need a register containing the address end_elements below,
+ // so write back the value in end_elements.
+ __ Add(end_elements, elements,
+ Operand::UntagSmiAndScale(length, kPointerSizeLog2));
+ __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
+ } else {
+ __ B(gt, &call_builtin);
+
+ __ Peek(value, (argc - 1) * kPointerSize);
+ __ StoreNumberToDoubleElements(value, length, elements, x10, d0, d1,
+ &call_builtin, argc * kDoubleSize);
+ }
+
+ // Save new length.
+ __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Return length.
+ __ Drop(argc + 1);
+ __ Mov(x0, length);
+ __ Ret();
+
+ if (IsFastDoubleElementsKind(elements_kind())) {
+ __ Bind(&call_builtin);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+ return;
+ }
+
+ __ Bind(&with_write_barrier);
+
+ if (IsFastSmiElementsKind(elements_kind())) {
+ if (FLAG_trace_elements_transitions) {
+ __ B(&call_builtin);
+ }
+
+ __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ JumpIfHeapNumber(x10, &call_builtin);
+
+ ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
+ ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kNativeContextOffset));
+ __ Ldr(x10, ContextMemOperand(x10, Context::JS_ARRAY_MAPS_INDEX));
+ const int header_size = FixedArrayBase::kHeaderSize;
+ // Verify that the object can be transitioned in place.
+ const int origin_offset = header_size + elements_kind() * kPointerSize;
+ __ ldr(x11, FieldMemOperand(receiver, origin_offset));
+ __ ldr(x12, FieldMemOperand(x10, HeapObject::kMapOffset));
+ __ cmp(x11, x12);
+ __ B(ne, &call_builtin);
+
+ const int target_offset = header_size + target_kind * kPointerSize;
+ __ Ldr(x10, FieldMemOperand(x10, target_offset));
+ __ Mov(x11, receiver);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ masm, DONT_TRACK_ALLOCATION_SITE, NULL);
+ }
+
+ // Save new length.
+ __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Store the value.
+ // We may need a register containing the address end_elements below,
+ // so write back the value in end_elements.
+ __ Add(end_elements, elements,
+ Operand::UntagSmiAndScale(length, kPointerSizeLog2));
+ __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
+
+ __ RecordWrite(elements,
+ end_elements,
+ value,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Drop(argc + 1);
+ __ Mov(x0, length);
+ __ Ret();
+
+ __ Bind(&attempt_to_grow_elements);
+
+ if (!FLAG_inline_new) {
+ __ B(&call_builtin);
+ }
+
+ Register argument = x2;
+ __ Peek(argument, (argc - 1) * kPointerSize);
+ // Growing elements that are SMI-only requires special handling in case
+ // the new element is non-Smi. For now, delegate to the builtin.
+ if (IsFastSmiElementsKind(elements_kind())) {
+ __ JumpIfNotSmi(argument, &call_builtin);
+ }
+
+ // We could be lucky and the elements array could be at the top of new-space.
+ // In this case we can just grow it in place by moving the allocation pointer
+ // up.
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate);
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address(isolate);
+
+ const int kAllocationDelta = 4;
+ ASSERT(kAllocationDelta >= argc);
+ Register allocation_top_addr = x5;
+ Register allocation_top = x9;
+ // Load top and check if it is the end of elements.
+ __ Add(end_elements, elements,
+ Operand::UntagSmiAndScale(length, kPointerSizeLog2));
+ __ Add(end_elements, end_elements, kEndElementsOffset);
+ __ Mov(allocation_top_addr, new_space_allocation_top);
+ __ Ldr(allocation_top, MemOperand(allocation_top_addr));
+ __ Cmp(end_elements, allocation_top);
+ __ B(ne, &call_builtin);
+
+ __ Mov(x10, new_space_allocation_limit);
+ __ Ldr(x10, MemOperand(x10));
+ __ Add(allocation_top, allocation_top, kAllocationDelta * kPointerSize);
+ __ Cmp(allocation_top, x10);
+ __ B(hi, &call_builtin);
+
+ // We fit and could grow elements.
+ // Update new_space_allocation_top.
+ __ Str(allocation_top, MemOperand(allocation_top_addr));
+ // Push the argument.
+ __ Str(argument, MemOperand(end_elements));
+ // Fill the rest with holes.
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
+ ASSERT(kAllocationDelta == 4);
+ __ Stp(x10, x10, MemOperand(end_elements, 1 * kPointerSize));
+ __ Stp(x10, x10, MemOperand(end_elements, 3 * kPointerSize));
+
+ // Update elements' and array's sizes.
+ __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Add(elements_length, elements_length, Smi::FromInt(kAllocationDelta));
+ __ Str(elements_length,
+ FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // Elements are in new space, so write barrier is not required.
+ __ Drop(argc + 1);
+ __ Mov(x0, length);
+ __ Ret();
+
+ __ Bind(&call_builtin);
+ __ TailCallExternalReference(
+ ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
+}
+
+
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x1 : left
+ // -- x0 : right
+ // -- lr : return address
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ // Load x2 with the allocation site. We stick an undefined dummy value here
+ // and replace it with the real allocation site later when we instantiate this
+ // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+ __ LoadObject(x2, handle(isolate->heap()->undefined_value()));
+
+ // Make sure that we actually patched the allocation site.
+ if (FLAG_debug_code) {
+ __ AssertNotSmi(x2, kExpectedAllocationSite);
+ __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
+ kExpectedAllocationSite);
+ }
+
+ // Tail call into the stub that handles binary operations with allocation
+ // sites.
+ BinaryOpWithAllocationSiteStub stub(state_);
+ __ TailCallStub(&stub);
+}
+
+
+bool CodeStub::CanUseFPRegisters() {
+ // FP registers always available on ARM64.
+ return true;
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ // We need some extra registers for this stub, they have been allocated
+ // but we need to save them before using them.
+ regs_.Save(masm);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ Register value = regs_.scratch0();
+ __ Ldr(value, MemOperand(regs_.address()));
+ __ JumpIfNotInNewSpace(value, &dont_need_remembered_set);
+
+ __ CheckPageFlagSet(regs_.object(),
+ value,
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm);
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+
+ __ RememberedSetHelper(object_,
+ address_,
+ value_, // scratch1
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+
+ __ Bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm);
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+ __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ Register address =
+ x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
+ ASSERT(!address.Is(regs_.object()));
+ ASSERT(!address.Is(x0));
+ __ Mov(address, regs_.address());
+ __ Mov(x0, regs_.object());
+ __ Mov(x1, address);
+ __ Mov(x2, ExternalReference::isolate_address(masm->isolate()));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ ExternalReference function =
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate());
+ __ CallCFunction(function, 3, 0);
+
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label on_black;
+ Label need_incremental;
+ Label need_incremental_pop_scratch;
+
+ Register mem_chunk = regs_.scratch0();
+ Register counter = regs_.scratch1();
+ __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
+ __ Ldr(counter,
+ MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
+ __ Subs(counter, counter, 1);
+ __ Str(counter,
+ MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
+ __ B(mi, &need_incremental);
+
+ // If the object is not black we don't have to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_, // scratch1
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ Bind(&on_black);
+ // Get the value from the slot.
+ Register value = regs_.scratch0();
+ __ Ldr(value, MemOperand(regs_.address()));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlagClear(value,
+ regs_.scratch1(),
+ MemoryChunk::kEvacuationCandidateMask,
+ &ensure_not_white);
+
+ __ CheckPageFlagClear(regs_.object(),
+ regs_.scratch1(),
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+ &need_incremental);
+
+ __ Bind(&ensure_not_white);
+ }
+
+ // We need extra registers for this, so we push the object and the address
+ // register temporarily.
+ __ Push(regs_.address(), regs_.object());
+ __ EnsureNotWhite(value,
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ regs_.scratch2(), // Scratch.
+ &need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ regs_.Restore(masm); // Restore the extra scratch registers we used.
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_, // scratch1
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ Bind(&need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ __ Bind(&need_incremental);
+ // Fall through when we need to inform the incremental marker.
+}
+
+
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // We patch these two first instructions back and forth between a nop and
+ // real branch when we start and stop incremental heap marking.
+ // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
+ // are generated.
+ // See RecordWriteStub::Patch for details.
+ {
+ InstructionAccurateScope scope(masm, 2);
+ __ adr(xzr, &skip_to_incremental_noncompacting);
+ __ adr(xzr, &skip_to_incremental_compacting);
+ }
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_, // scratch1
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ }
+ __ Ret();
+
+ __ Bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ Bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+ // x0 value element value to store
+ // x3 index_smi element index as smi
+ // sp[0] array_index_smi array literal index in function as smi
+ // sp[1] array array literal
+
+ Register value = x0;
+ Register index_smi = x3;
+
+ Register array = x1;
+ Register array_map = x2;
+ Register array_index_smi = x4;
+ __ PeekPair(array_index_smi, array, 0);
+ __ Ldr(array_map, FieldMemOperand(array, JSObject::kMapOffset));
+
+ Label double_elements, smi_element, fast_elements, slow_elements;
+ Register bitfield2 = x10;
+ __ Ldrb(bitfield2, FieldMemOperand(array_map, Map::kBitField2Offset));
+
+ // Jump if array's ElementsKind is not FAST*_SMI_ELEMENTS, FAST_ELEMENTS or
+ // FAST_HOLEY_ELEMENTS.
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ __ Cmp(bitfield2, Map::kMaximumBitField2FastHoleyElementValue);
+ __ B(hi, &double_elements);
+
+ __ JumpIfSmi(value, &smi_element);
+
+ // Jump if array's ElementsKind is not FAST_ELEMENTS or FAST_HOLEY_ELEMENTS.
+ __ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::kElementsKindShift),
+ &fast_elements);
+
+ // Store into the array literal requires an elements transition. Call into
+ // the runtime.
+ __ Bind(&slow_elements);
+ __ Push(array, index_smi, value);
+ __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x11, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
+ __ Push(x11, array_index_smi);
+ __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+ // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
+ __ Bind(&fast_elements);
+ __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
+ __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
+ __ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Str(value, MemOperand(x11));
+ // Update the write barrier for the array store.
+ __ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Ret();
+
+ // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
+ // and value is Smi.
+ __ Bind(&smi_element);
+ __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
+ __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
+ __ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize));
+ __ Ret();
+
+ __ Bind(&double_elements);
+ __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
+ __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0, d1,
+ &slow_elements);
+ __ Ret();
+}
+
+
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
+ __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ Ldr(x1, MemOperand(fp, parameter_count_offset));
+ if (function_mode_ == JS_FUNCTION_STUB_MODE) {
+ __ Add(x1, x1, 1);
+ }
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ Drop(x1);
+ // Return to IC Miss stub, continuation still on stack.
+ __ Ret();
+}
+
+
+// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
+// a "Push lr" instruction, followed by a call.
+static const unsigned int kProfileEntryHookCallSize =
+ Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
+
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (masm->isolate()->function_entry_hook() != NULL) {
+ ProfileEntryHookStub stub;
+ Assembler::BlockConstPoolScope no_const_pools(masm);
+ Label entry_hook_call_start;
+ __ Bind(&entry_hook_call_start);
+ __ Push(lr);
+ __ CallStub(&stub);
+ ASSERT(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
+ kProfileEntryHookCallSize);
+
+ __ Pop(lr);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+
+ // Save all kCallerSaved registers (including lr), since this can be called
+ // from anywhere.
+ // TODO(jbramley): What about FP registers?
+ __ PushCPURegList(kCallerSaved);
+ ASSERT(kCallerSaved.IncludesAliasOf(lr));
+ const int kNumSavedRegs = kCallerSaved.Count();
+
+ // Compute the function's address as the first argument.
+ __ Sub(x0, lr, kProfileEntryHookCallSize);
+
+#if V8_HOST_ARCH_ARM64
+ uintptr_t entry_hook =
+ reinterpret_cast<uintptr_t>(masm->isolate()->function_entry_hook());
+ __ Mov(x10, entry_hook);
+#else
+ // Under the simulator we need to indirect the entry hook through a trampoline
+ // function at a known address.
+ ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
+ __ Mov(x10, Operand(ExternalReference(&dispatcher,
+ ExternalReference::BUILTIN_CALL,
+ masm->isolate())));
+ // It additionally takes an isolate as a third parameter
+ __ Mov(x2, ExternalReference::isolate_address(masm->isolate()));
+#endif
+
+ // The caller's return address is above the saved temporaries.
+ // Grab its location for the second argument to the hook.
+ __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
+
+ {
+ // Create a dummy frame, as CallCFunction requires this.
+ FrameScope frame(masm, StackFrame::MANUAL);
+ __ CallCFunction(x10, 2, 0);
+ }
+
+ __ PopCPURegList(kCallerSaved);
+ __ Ret();
+}
+
+
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+ // When calling into C++ code the stack pointer must be csp.
+ // Therefore this code must use csp for peek/poke operations when the
+ // stub is generated. When the stub is called
+ // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
+ // and configure the stack pointer *before* doing the call.
+ const Register old_stack_pointer = __ StackPointer();
+ __ SetStackPointer(csp);
+
+ // Put return address on the stack (accessible to GC through exit frame pc).
+ __ Poke(lr, 0);
+ // Call the C++ function.
+ __ Blr(x10);
+ // Return to calling code.
+ __ Peek(lr, 0);
+ __ Ret();
+
+ __ SetStackPointer(old_stack_pointer);
+}
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+ Register target) {
+ // Make sure the caller configured the stack pointer (see comment in
+ // DirectCEntryStub::Generate).
+ ASSERT(csp.Is(__ StackPointer()));
+
+ intptr_t code =
+ reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
+ __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
+ __ Mov(x10, target);
+ // Branch to the stub.
+ __ Blr(lr);
+}
+
+
+// Probe the name dictionary in the 'elements' register.
+// Jump to the 'done' label if a property with the given name is found.
+// Jump to the 'miss' label otherwise.
+//
+// If lookup was successful 'scratch2' will be equal to elements + 4 * index.
+// 'elements' and 'name' registers are preserved on miss.
+void NameDictionaryLookupStub::GeneratePositiveLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(elements, name, scratch1, scratch2));
+
+ // Assert that name contains a string.
+ __ AssertName(name);
+
+ // Compute the capacity mask.
+ __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
+ __ Sub(scratch1, scratch1, 1);
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
+ __ Add(scratch2, scratch2, Operand(
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift));
+ }
+ __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
+
+ // Check if the key is identical to the name.
+ UseScratchRegisterScope temps(masm);
+ Register scratch3 = temps.AcquireX();
+ __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
+ __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
+ __ Cmp(name, scratch3);
+ __ B(eq, done);
+ }
+
+ // The inlined probes didn't find the entry.
+ // Call the complete stub to scan the whole dictionary.
+
+ CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
+ spill_list.Combine(lr);
+ spill_list.Remove(scratch1);
+ spill_list.Remove(scratch2);
+
+ __ PushCPURegList(spill_list);
+
+ if (name.is(x0)) {
+ ASSERT(!elements.is(x1));
+ __ Mov(x1, name);
+ __ Mov(x0, elements);
+ } else {
+ __ Mov(x0, elements);
+ __ Mov(x1, name);
+ }
+
+ Label not_found;
+ NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
+ __ CallStub(&stub);
+ __ Cbz(x0, &not_found);
+ __ Mov(scratch2, x2); // Move entry index into scratch2.
+ __ PopCPURegList(spill_list);
+ __ B(done);
+
+ __ Bind(&not_found);
+ __ PopCPURegList(spill_list);
+ __ B(miss);
+}
+
+
+void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<Name> name,
+ Register scratch0) {
+ ASSERT(!AreAliased(receiver, properties, scratch0));
+ ASSERT(name->IsUniqueName());
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the hole value).
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // scratch0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = scratch0;
+ // Capacity is smi 2^n.
+ __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
+ __ Sub(index, index, 1);
+ __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
+
+ Register entity_name = scratch0;
+ // Having undefined at this place means the name is not contained.
+ Register tmp = index;
+ __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
+ __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+ __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
+
+ // Stop if found the property.
+ __ Cmp(entity_name, Operand(name));
+ __ B(eq, miss);
+
+ Label good;
+ __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
+
+ // Check if the entry name is not a unique name.
+ __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ Ldrb(entity_name,
+ FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueName(entity_name, miss);
+ __ Bind(&good);
+ }
+
+ CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
+ spill_list.Combine(lr);
+ spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.
+
+ __ PushCPURegList(spill_list);
+
+ __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Mov(x1, Operand(name));
+ NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+ __ CallStub(&stub);
+ // Move stub return value to scratch0. Note that scratch0 is not included in
+ // spill_list and won't be clobbered by PopCPURegList.
+ __ Mov(scratch0, x0);
+ __ PopCPURegList(spill_list);
+
+ __ Cbz(scratch0, done);
+ __ B(miss);
+}
+
+
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
+ //
+ // Arguments are in x0 and x1:
+ // x0: property dictionary.
+ // x1: the name of the property we are looking for.
+ //
+ // Return value is in x0 and is zero if lookup failed, non zero otherwise.
+ // If the lookup is successful, x2 will contains the index of the entry.
+
+ Register result = x0;
+ Register dictionary = x0;
+ Register key = x1;
+ Register index = x2;
+ Register mask = x3;
+ Register hash = x4;
+ Register undefined = x5;
+ Register entry_key = x6;
+
+ Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+ __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
+ __ Sub(mask, mask, 1);
+
+ __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ // Capacity is smi 2^n.
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(NameDictionary::GetProbeOffset(i) <
+ 1 << (32 - Name::kHashFieldOffset));
+ __ Add(index, hash,
+ NameDictionary::GetProbeOffset(i) << Name::kHashShift);
+ } else {
+ __ Mov(index, hash);
+ }
+ __ And(index, mask, Operand(index, LSR, Name::kHashShift));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NameDictionary::kEntrySize == 3);
+ __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
+
+ __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
+ __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+ // Having undefined at this place means the name is not contained.
+ __ Cmp(entry_key, undefined);
+ __ B(eq, &not_in_dictionary);
+
+ // Stop if found the property.
+ __ Cmp(entry_key, key);
+ __ B(eq, &in_dictionary);
+
+ if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ // Check if the entry name is not a unique name.
+ __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+ __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+ __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+ }
+ }
+
+ __ Bind(&maybe_in_dictionary);
+ // If we are doing negative lookup then probing failure should be
+ // treated as a lookup success. For positive lookup, probing failure
+ // should be treated as lookup failure.
+ if (mode_ == POSITIVE_LOOKUP) {
+ __ Mov(result, 0);
+ __ Ret();
+ }
+
+ __ Bind(&in_dictionary);
+ __ Mov(result, 1);
+ __ Ret();
+
+ __ Bind(&not_in_dictionary);
+ __ Mov(result, 0);
+ __ Ret();
+}
+
+
+template<class T>
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ ASM_LOCATION("CreateArrayDispatch");
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(GetInitialFastElementsKind(), mode);
+ __ TailCallStub(&stub);
+
+ } else if (mode == DONT_OVERRIDE) {
+ Register kind = x3;
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
+ // TODO(jbramley): Is this the best way to handle this? Can we make the
+ // tail calls conditional, rather than hopping over each one?
+ __ CompareAndBranch(kind, candidate_kind, ne, &next);
+ T stub(candidate_kind);
+ __ TailCallStub(&stub);
+ __ Bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+// TODO(jbramley): If this needs to be a special case, make it a proper template
+// specialization, and not a separate function.
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ ASM_LOCATION("CreateArrayDispatchOneArgument");
+ // x0 - argc
+ // x1 - constructor?
+ // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
+ // x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
+ // sp[0] - last argument
+
+ Register allocation_site = x2;
+ Register kind = x3;
+
+ Label normal_sequence;
+ if (mode == DONT_OVERRIDE) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // Is the low bit set? If so, the array is holey.
+ __ Tbnz(kind, 0, &normal_sequence);
+ }
+
+ // Look at the last argument.
+ // TODO(jbramley): What does a 0 argument represent?
+ __ Peek(x10, 0);
+ __ Cbz(x10, &normal_sequence);
+
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
+
+ ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
+
+ __ Bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(initial,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub);
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the slot).
+ __ Orr(kind, kind, 1);
+
+ if (FLAG_debug_code) {
+ __ Ldr(x10, FieldMemOperand(allocation_site, 0));
+ __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
+ &normal_sequence);
+ __ Assert(eq, kExpectedAllocationSite);
+ }
+
+ // Save the resulting elements kind in type info. We can't just store 'kind'
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field; upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ Ldr(x11, FieldMemOperand(allocation_site,
+ AllocationSite::kTransitionInfoOffset));
+ __ Add(x11, x11, Smi::FromInt(kFastElementsKindPackedToHoley));
+ __ Str(x11, FieldMemOperand(allocation_site,
+ AllocationSite::kTransitionInfoOffset));
+
+ __ Bind(&normal_sequence);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
+ __ CompareAndBranch(kind, candidate_kind, ne, &next);
+ ArraySingleArgumentConstructorStub stub(candidate_kind);
+ __ TailCallStub(&stub);
+ __ Bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+template<class T>
+static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ int to_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= to_index; ++i) {
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ T stub(kind);
+ stub.GetCode(isolate);
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ T stub1(kind, DISABLE_ALLOCATION_SITES);
+ stub1.GetCode(isolate);
+ }
+ }
+}
+
+
+void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
+ isolate);
+ ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
+ isolate);
+}
+
+
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+ Isolate* isolate) {
+ ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ for (int i = 0; i < 2; i++) {
+ // For internal arrays we only need a few things
+ InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
+ stubh1.GetCode(isolate);
+ InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
+ stubh2.GetCode(isolate);
+ InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
+ stubh3.GetCode(isolate);
+ }
+}
+
+
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ Register argc = x0;
+ if (argument_count_ == ANY) {
+ Label zero_case, n_case;
+ __ Cbz(argc, &zero_case);
+ __ Cmp(argc, 1);
+ __ B(ne, &n_case);
+
+ // One argument.
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ Bind(&zero_case);
+ // No arguments.
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ Bind(&n_case);
+ // N arguments.
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void ArrayConstructorStub::Generate(MacroAssembler* masm) {
+ ASM_LOCATION("ArrayConstructorStub::Generate");
+ // ----------- S t a t e -------------
+ // -- x0 : argc (only if argument_count_ == ANY)
+ // -- x1 : constructor
+ // -- x2 : AllocationSite or undefined
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+ Register constructor = x1;
+ Register allocation_site = x2;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ Label unexpected_map, map_ok;
+ // Initial map for the builtin Array function should be a map.
+ __ Ldr(x10, FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ JumpIfSmi(x10, &unexpected_map);
+ __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
+ __ Bind(&unexpected_map);
+ __ Abort(kUnexpectedInitialMapForArrayFunction);
+ __ Bind(&map_ok);
+
+ // We should either have undefined in the allocation_site register or a
+ // valid AllocationSite.
+ __ AssertUndefinedOrAllocationSite(allocation_site, x10);
+ }
+
+ Register kind = x3;
+ Label no_info;
+ // Get the elements kind and case on that.
+ __ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
+
+ __ Ldrsw(kind,
+ UntagSmiFieldMemOperand(allocation_site,
+ AllocationSite::kTransitionInfoOffset));
+ __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
+
+ __ Bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+}
+
+
+void InternalArrayConstructorStub::GenerateCase(
+ MacroAssembler* masm, ElementsKind kind) {
+ Label zero_case, n_case;
+ Register argc = x0;
+
+ __ Cbz(argc, &zero_case);
+ __ CompareAndBranch(argc, 1, ne, &n_case);
+
+ // One argument.
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+
+ // We might need to create a holey array; look at the first argument.
+ __ Peek(x10, 0);
+ __ Cbz(x10, &packed_case);
+
+ InternalArraySingleArgumentConstructorStub
+ stub1_holey(GetHoleyElementsKind(kind));
+ __ TailCallStub(&stub1_holey);
+
+ __ Bind(&packed_case);
+ }
+ InternalArraySingleArgumentConstructorStub stub1(kind);
+ __ TailCallStub(&stub1);
+
+ __ Bind(&zero_case);
+ // No arguments.
+ InternalArrayNoArgumentConstructorStub stub0(kind);
+ __ TailCallStub(&stub0);
+
+ __ Bind(&n_case);
+ // N arguments.
+ InternalArrayNArgumentsConstructorStub stubN(kind);
+ __ TailCallStub(&stubN);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- x1 : constructor
+ // -- sp[0] : return address
+ // -- sp[4] : last argument
+ // -----------------------------------
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+
+ Register constructor = x1;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the global and natives
+ // builtin Array functions which always have maps.
+
+ Label unexpected_map, map_ok;
+ // Initial map for the builtin Array function should be a map.
+ __ Ldr(x10, FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ JumpIfSmi(x10, &unexpected_map);
+ __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
+ __ Bind(&unexpected_map);
+ __ Abort(kUnexpectedInitialMapForArrayFunction);
+ __ Bind(&map_ok);
+ }
+
+ Register kind = w3;
+ // Figure out the right elements kind
+ __ Ldr(x10, FieldMemOperand(constructor,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Retrieve elements_kind from map.
+ __ LoadElementsKindFromMap(kind, x10);
+
+ if (FLAG_debug_code) {
+ Label done;
+ __ Cmp(x3, FAST_ELEMENTS);
+ __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
+ __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ }
+
+ Label fast_elements_case;
+ __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
+ GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+ __ Bind(&fast_elements_case);
+ GenerateCase(masm, FAST_ELEMENTS);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : callee
+ // -- x4 : call_data
+ // -- x2 : holder
+ // -- x1 : api_function_address
+ // -- cp : context
+ // --
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[(argc - 1) * 8] : first argument
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+
+ Register callee = x0;
+ Register call_data = x4;
+ Register holder = x2;
+ Register api_function_address = x1;
+ Register context = cp;
+
+ int argc = ArgumentBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
+ bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ Isolate* isolate = masm->isolate();
+
+ // FunctionCallbackArguments: context, callee and call data.
+ __ Push(context, callee, call_data);
+
+ // Load context from callee
+ __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+
+ if (!call_data_undefined) {
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ }
+ Register isolate_reg = x5;
+ __ Mov(isolate_reg, ExternalReference::isolate_address(isolate));
+
+ // FunctionCallbackArguments:
+ // return value, return value default, isolate, holder.
+ __ Push(call_data, call_data, isolate_reg, holder);
+
+ // Prepare arguments.
+ Register args = x6;
+ __ Mov(args, masm->StackPointer());
+
+ // Allocate the v8::Arguments structure in the arguments' space, since it's
+ // not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ // Allocate space for CallApiFunctionAndReturn can store some scratch
+ // registeres on the stack.
+ const int kCallApiFunctionSpillSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
+
+ ASSERT(!AreAliased(x0, api_function_address));
+ // x0 = FunctionCallbackInfo&
+ // Arguments is after the return address.
+ __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
+ // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
+ __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
+ __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
+ // FunctionCallbackInfo::length_ = argc and
+ // FunctionCallbackInfo::is_construct_call = 0
+ __ Mov(x10, argc);
+ __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
+
+ const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ masm->isolate());
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ // Stores return the first js argument
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+
+ const int spill_offset = 1 + kApiStackSpace;
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ spill_offset,
+ return_value_operand,
+ &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : name
+ // -- sp[8 - kArgsLength*8] : PropertyCallbackArguments object
+ // -- ...
+ // -- x2 : api_function_address
+ // -----------------------------------
+
+ Register api_function_address = x2;
+
+ __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
+ __ Add(x1, x0, 1 * kPointerSize); // x1 = PCA
+
+ const int kApiStackSpace = 1;
+
+ // Allocate space for CallApiFunctionAndReturn can store some scratch
+ // registeres on the stack.
+ const int kCallApiFunctionSpillSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
+
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
+ // x1 (internal::Object** args_) as the data.
+ __ Poke(x1, 1 * kPointerSize);
+ __ Add(x1, masm->StackPointer(), 1 * kPointerSize); // x1 = AccessorInfo&
+
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ ExternalReference::Type thunk_type =
+ ExternalReference::PROFILING_GETTER_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
+ masm->isolate());
+
+ const int spill_offset = 1 + kApiStackSpace;
+ __ CallApiFunctionAndReturn(api_function_address,
+ thunk_ref,
+ kStackUnwindSpace,
+ spill_offset,
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/code-stubs-arm64.h b/deps/v8/src/arm64/code-stubs-arm64.h
new file mode 100644
index 000000000..7e09ffa57
--- /dev/null
+++ b/deps/v8/src/arm64/code-stubs-arm64.h
@@ -0,0 +1,500 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_CODE_STUBS_ARM64_H_
+#define V8_ARM64_CODE_STUBS_ARM64_H_
+
+#include "ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
+
+
+class StoreBufferOverflowStub: public PlatformCodeStub {
+ public:
+ explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+ : save_doubles_(save_fp) { }
+
+ void Generate(MacroAssembler* masm);
+
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ SaveFPRegsMode save_doubles_;
+
+ Major MajorKey() { return StoreBufferOverflow; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
+class StringHelper : public AllStatic {
+ public:
+ // TODO(all): These don't seem to be used any more. Delete them.
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+class StoreRegistersStateStub: public PlatformCodeStub {
+ public:
+ explicit StoreRegistersStateStub(SaveFPRegsMode with_fp)
+ : save_doubles_(with_fp) {}
+
+ static Register to_be_pushed_lr() { return ip0; }
+ static void GenerateAheadOfTime(Isolate* isolate);
+ private:
+ Major MajorKey() { return StoreRegistersState; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+ SaveFPRegsMode save_doubles_;
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class RestoreRegistersStateStub: public PlatformCodeStub {
+ public:
+ explicit RestoreRegistersStateStub(SaveFPRegsMode with_fp)
+ : save_doubles_(with_fp) {}
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+ private:
+ Major MajorKey() { return RestoreRegistersState; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+ SaveFPRegsMode save_doubles_;
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class RecordWriteStub: public PlatformCodeStub {
+ public:
+ // Stub to record the write of 'value' at 'address' in 'object'.
+ // Typically 'address' = 'object' + <some offset>.
+ // See MacroAssembler::RecordWriteField() for example.
+ RecordWriteStub(Register object,
+ Register value,
+ Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode)
+ : object_(object),
+ value_(value),
+ address_(address),
+ remembered_set_action_(remembered_set_action),
+ save_fp_regs_mode_(fp_mode),
+ regs_(object, // An input reg.
+ address, // An input reg.
+ value) { // One scratch reg.
+ }
+
+ enum Mode {
+ STORE_BUFFER_ONLY,
+ INCREMENTAL,
+ INCREMENTAL_COMPACTION
+ };
+
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ static Mode GetMode(Code* stub) {
+ // Find the mode depending on the first two instructions.
+ Instruction* instr1 =
+ reinterpret_cast<Instruction*>(stub->instruction_start());
+ Instruction* instr2 = instr1->following();
+
+ if (instr1->IsUncondBranchImm()) {
+ ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
+ return INCREMENTAL;
+ }
+
+ ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
+
+ if (instr2->IsUncondBranchImm()) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ ASSERT(instr2->IsPCRelAddressing());
+
+ return STORE_BUFFER_ONLY;
+ }
+
+ // We patch the two first instructions of the stub back and forth between an
+ // adr and branch when we start and stop incremental heap marking.
+ // The branch is
+ // b label
+ // The adr is
+ // adr xzr label
+ // so effectively a nop.
+ static void Patch(Code* stub, Mode mode) {
+ // We are going to patch the two first instructions of the stub.
+ PatchingAssembler patcher(
+ reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
+ Instruction* instr1 = patcher.InstructionAt(0);
+ Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
+ // Instructions must be either 'adr' or 'b'.
+ ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
+ ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
+ // Retrieve the offsets to the labels.
+ int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
+ int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
+
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ ASSERT(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ patcher.adr(xzr, offset_to_incremental_noncompacting);
+ patcher.adr(xzr, offset_to_incremental_compacting);
+ break;
+ case INCREMENTAL:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
+ patcher.adr(xzr, offset_to_incremental_compacting);
+ break;
+ case INCREMENTAL_COMPACTION:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ patcher.adr(xzr, offset_to_incremental_noncompacting);
+ patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
+ break;
+ }
+ ASSERT(GetMode(stub) == mode);
+ }
+
+ private:
+ // This is a helper class to manage the registers associated with the stub.
+ // The 'object' and 'address' registers must be preserved.
+ class RegisterAllocation {
+ public:
+ RegisterAllocation(Register object,
+ Register address,
+ Register scratch)
+ : object_(object),
+ address_(address),
+ scratch0_(scratch),
+ saved_regs_(kCallerSaved) {
+ ASSERT(!AreAliased(scratch, object, address));
+
+ // We would like to require more scratch registers for this stub,
+ // but the number of registers comes down to the ones used in
+ // FullCodeGen::SetVar(), which is architecture independent.
+ // We allocate 2 extra scratch registers that we'll save on the stack.
+ CPURegList pool_available = GetValidRegistersForAllocation();
+ CPURegList used_regs(object, address, scratch);
+ pool_available.Remove(used_regs);
+ scratch1_ = Register(pool_available.PopLowestIndex());
+ scratch2_ = Register(pool_available.PopLowestIndex());
+
+ // SaveCallerRegisters method needs to save caller saved register, however
+ // we don't bother saving ip0 and ip1 because they are used as scratch
+ // registers by the MacroAssembler.
+ saved_regs_.Remove(ip0);
+ saved_regs_.Remove(ip1);
+
+ // The scratch registers will be restored by other means so we don't need
+ // to save them with the other caller saved registers.
+ saved_regs_.Remove(scratch0_);
+ saved_regs_.Remove(scratch1_);
+ saved_regs_.Remove(scratch2_);
+ }
+
+ void Save(MacroAssembler* masm) {
+ // We don't have to save scratch0_ because it was given to us as
+ // a scratch register.
+ masm->Push(scratch1_, scratch2_);
+ }
+
+ void Restore(MacroAssembler* masm) {
+ masm->Pop(scratch2_, scratch1_);
+ }
+
+ // If we have to call into C then we need to save and restore all caller-
+ // saved registers that were not already preserved.
+ void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+ // TODO(all): This can be very expensive, and it is likely that not every
+ // register will need to be preserved. Can we improve this?
+ masm->PushCPURegList(saved_regs_);
+ if (mode == kSaveFPRegs) {
+ masm->PushCPURegList(kCallerSavedFP);
+ }
+ }
+
+ void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) {
+ // TODO(all): This can be very expensive, and it is likely that not every
+ // register will need to be preserved. Can we improve this?
+ if (mode == kSaveFPRegs) {
+ masm->PopCPURegList(kCallerSavedFP);
+ }
+ masm->PopCPURegList(saved_regs_);
+ }
+
+ Register object() { return object_; }
+ Register address() { return address_; }
+ Register scratch0() { return scratch0_; }
+ Register scratch1() { return scratch1_; }
+ Register scratch2() { return scratch2_; }
+
+ private:
+ Register object_;
+ Register address_;
+ Register scratch0_;
+ Register scratch1_;
+ Register scratch2_;
+ CPURegList saved_regs_;
+
+ // TODO(all): We should consider moving this somewhere else.
+ static CPURegList GetValidRegistersForAllocation() {
+ // The list of valid registers for allocation is defined as all the
+ // registers without those with a special meaning.
+ //
+ // The default list excludes registers x26 to x31 because they are
+ // reserved for the following purpose:
+ // - x26 root register
+ // - x27 context pointer register
+ // - x28 jssp
+ // - x29 frame pointer
+ // - x30 link register(lr)
+ // - x31 xzr/stack pointer
+ CPURegList list(CPURegister::kRegister, kXRegSizeInBits, 0, 25);
+
+ // We also remove MacroAssembler's scratch registers.
+ list.Remove(ip0);
+ list.Remove(ip1);
+ list.Remove(x8);
+ list.Remove(x9);
+
+ return list;
+ }
+
+ friend class RecordWriteStub;
+ };
+
+ // A list of stub variants which are pregenerated.
+ // The variants are stored in the same format as the minor key, so
+ // MinorKeyFor() can be used to populate and check this list.
+ static const int kAheadOfTime[];
+
+ void Generate(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ };
+
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ return MinorKeyFor(object_, value_, address_, remembered_set_action_,
+ save_fp_regs_mode_);
+ }
+
+ static int MinorKeyFor(Register object,
+ Register value,
+ Register address,
+ RememberedSetAction action,
+ SaveFPRegsMode fp_mode) {
+ ASSERT(object.Is64Bits());
+ ASSERT(value.Is64Bits());
+ ASSERT(address.Is64Bits());
+ return ObjectBits::encode(object.code()) |
+ ValueBits::encode(value.code()) |
+ AddressBits::encode(address.code()) |
+ RememberedSetActionBits::encode(action) |
+ SaveFPRegsModeBits::encode(fp_mode);
+ }
+
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
+
+ class ObjectBits: public BitField<int, 0, 5> {};
+ class ValueBits: public BitField<int, 5, 5> {};
+ class AddressBits: public BitField<int, 10, 5> {};
+ class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
+ class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
+
+ Register object_;
+ Register value_;
+ Register address_;
+ RememberedSetAction remembered_set_action_;
+ SaveFPRegsMode save_fp_regs_mode_;
+ Label slow_;
+ RegisterAllocation regs_;
+};
+
+
+// Helper to call C++ functions from generated code. The caller must prepare
+// the exit frame before doing the call with GenerateCall.
+class DirectCEntryStub: public PlatformCodeStub {
+ public:
+ DirectCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+ void GenerateCall(MacroAssembler* masm, Register target);
+
+ private:
+ Major MajorKey() { return DirectCEntry; }
+ int MinorKey() { return 0; }
+
+ bool NeedsImmovableCode() { return true; }
+};
+
+
+class NameDictionaryLookupStub: public PlatformCodeStub {
+ public:
+ enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+ explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+
+ void Generate(MacroAssembler* masm);
+
+ static void GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ Handle<Name> name,
+ Register scratch0);
+
+ static void GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2);
+
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ static const int kInlinedProbes = 4;
+ static const int kTotalProbes = 20;
+
+ static const int kCapacityOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kCapacityIndex * kPointerSize;
+
+ static const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+
+ Major MajorKey() { return NameDictionaryLookup; }
+
+ int MinorKey() {
+ return LookupModeBits::encode(mode_);
+ }
+
+ class LookupModeBits: public BitField<LookupMode, 0, 1> {};
+
+ LookupMode mode_;
+};
+
+
+class SubStringStub: public PlatformCodeStub {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public PlatformCodeStub {
+ public:
+ StringCompareStub() { }
+
+ // Compares two flat ASCII strings and returns result in x0.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ // Compare two flat ASCII strings for equality and returns result
+ // in x0.
+ static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ private:
+ virtual Major MajorKey() { return StringCompare; }
+ virtual int MinorKey() { return 0; }
+ virtual void Generate(MacroAssembler* masm);
+
+ static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* chars_not_equal);
+};
+
+
+struct PlatformCallInterfaceDescriptor {
+ explicit PlatformCallInterfaceDescriptor(
+ TargetAddressStorageMode storage_mode)
+ : storage_mode_(storage_mode) { }
+
+ TargetAddressStorageMode storage_mode() { return storage_mode_; }
+
+ private:
+ TargetAddressStorageMode storage_mode_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_CODE_STUBS_ARM64_H_
diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc
new file mode 100644
index 000000000..831d44986
--- /dev/null
+++ b/deps/v8/src/arm64/codegen-arm64.cc
@@ -0,0 +1,615 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "codegen.h"
+#include "macro-assembler.h"
+#include "simulator-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+#if defined(USE_SIMULATOR)
+byte* fast_exp_arm64_machine_code = NULL;
+double fast_exp_simulator(double x) {
+ Simulator * simulator = Simulator::current(Isolate::Current());
+ Simulator::CallArgument args[] = {
+ Simulator::CallArgument(x),
+ Simulator::CallArgument::End()
+ };
+ return simulator->CallDouble(fast_exp_arm64_machine_code, args);
+}
+#endif
+
+
+UnaryMathFunction CreateExpFunction() {
+ if (!FLAG_fast_math) return &std::exp;
+
+ // Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
+ // an AAPCS64-compliant exp() function. This will be faster than the C
+ // library's exp() function, but probably less accurate.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &std::exp;
+
+ ExternalReference::InitializeMathExpData();
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ masm.SetStackPointer(csp);
+
+ // The argument will be in d0 on entry.
+ DoubleRegister input = d0;
+ // Use other caller-saved registers for all other values.
+ DoubleRegister result = d1;
+ DoubleRegister double_temp1 = d2;
+ DoubleRegister double_temp2 = d3;
+ Register temp1 = x10;
+ Register temp2 = x11;
+ Register temp3 = x12;
+
+ MathExpGenerator::EmitMathExp(&masm, input, result,
+ double_temp1, double_temp2,
+ temp1, temp2, temp3);
+ // Move the result to the return register.
+ masm.Fmov(d0, result);
+ masm.Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+
+#if !defined(USE_SIMULATOR)
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#else
+ fast_exp_arm64_machine_code = buffer;
+ return &fast_exp_simulator;
+#endif
+}
+
+
+UnaryMathFunction CreateSqrtFunction() {
+ return &std::sqrt;
+}
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterFrame(StackFrame::INTERNAL);
+ ASSERT(!masm->has_frame());
+ masm->set_has_frame(true);
+}
+
+
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveFrame(StackFrame::INTERNAL);
+ ASSERT(masm->has_frame());
+ masm->set_has_frame(false);
+}
+
+
+// -------------------------------------------------------------------------
+// Code generators
+
+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ MacroAssembler* masm, AllocationSiteMode mode,
+ Label* allocation_memento_found) {
+ // ----------- S t a t e -------------
+ // -- x2 : receiver
+ // -- x3 : target map
+ // -----------------------------------
+ Register receiver = x2;
+ Register map = x3;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ ASSERT(allocation_memento_found != NULL);
+ __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
+ allocation_memento_found);
+ }
+
+ // Set transitioned map.
+ __ Str(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver,
+ HeapObject::kMapOffset,
+ map,
+ x10,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiToDouble(
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+ ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- x3 : target map, scratch for subsequent call
+ // -----------------------------------
+ Register receiver = x2;
+ Register target_map = x3;
+
+ Label gc_required, only_change_map;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ Register elements = x4;
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
+
+ __ Push(lr);
+ Register length = x5;
+ __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
+ FixedArray::kLengthOffset));
+
+ // Allocate new FixedDoubleArray.
+ Register array_size = x6;
+ Register array = x7;
+ __ Lsl(array_size, length, kDoubleSizeLog2);
+ __ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
+ __ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
+ // Register array is non-tagged heap object.
+
+ // Set the destination FixedDoubleArray's length and map.
+ Register map_root = x6;
+ __ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
+ __ SmiTag(x11, length);
+ __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
+
+ __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
+ kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ // Replace receiver's backing store with newly created FixedDoubleArray.
+ __ Add(x10, array, kHeapObjectTag);
+ __ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
+ x6, kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Prepare for conversion loop.
+ Register src_elements = x10;
+ Register dst_elements = x11;
+ Register dst_end = x12;
+ __ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(dst_elements, array, FixedDoubleArray::kHeaderSize);
+ __ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
+
+ FPRegister nan_d = d1;
+ __ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
+
+ Label entry, done;
+ __ B(&entry);
+
+ __ Bind(&only_change_map);
+ __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ B(&done);
+
+ // Call into runtime if GC is required.
+ __ Bind(&gc_required);
+ __ Pop(lr);
+ __ B(fail);
+
+ // Iterate over the array, copying and coverting smis to doubles. If an
+ // element is non-smi, write a hole to the destination.
+ {
+ Label loop;
+ __ Bind(&loop);
+ __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
+ __ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
+ __ Tst(x13, kSmiTagMask);
+ __ Fcsel(d0, d0, nan_d, eq);
+ __ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
+
+ __ Bind(&entry);
+ __ Cmp(dst_elements, dst_end);
+ __ B(lt, &loop);
+ }
+
+ __ Pop(lr);
+ __ Bind(&done);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+ ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -- x3 : target map, scratch for subsequent call
+ // -- x4 : scratch (elements)
+ // -----------------------------------
+ Register value = x0;
+ Register key = x1;
+ Register receiver = x2;
+ Register target_map = x3;
+
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
+ }
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ Label only_change_map;
+ Register elements = x4;
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
+
+ __ Push(lr);
+ // TODO(all): These registers may not need to be pushed. Examine
+ // RecordWriteStub and check whether it's needed.
+ __ Push(target_map, receiver, key, value);
+ Register length = x5;
+ __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
+ FixedArray::kLengthOffset));
+
+ // Allocate new FixedArray.
+ Register array_size = x6;
+ Register array = x7;
+ Label gc_required;
+ __ Mov(array_size, FixedDoubleArray::kHeaderSize);
+ __ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
+ __ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
+
+ // Set destination FixedDoubleArray's length and map.
+ Register map_root = x6;
+ __ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
+ __ SmiTag(x11, length);
+ __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
+
+ // Prepare for conversion loop.
+ Register src_elements = x10;
+ Register dst_elements = x11;
+ Register dst_end = x12;
+ __ Add(src_elements, elements,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+ __ Add(dst_elements, array, FixedArray::kHeaderSize);
+ __ Add(array, array, kHeapObjectTag);
+ __ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
+
+ Register the_hole = x14;
+ Register heap_num_map = x15;
+ __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
+
+ Label entry;
+ __ B(&entry);
+
+ // Call into runtime if GC is required.
+ __ Bind(&gc_required);
+ __ Pop(value, key, receiver, target_map);
+ __ Pop(lr);
+ __ B(fail);
+
+ {
+ Label loop, convert_hole;
+ __ Bind(&loop);
+ __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
+ __ Cmp(x13, kHoleNanInt64);
+ __ B(eq, &convert_hole);
+
+ // Non-hole double, copy value into a heap number.
+ Register heap_num = x5;
+ __ AllocateHeapNumber(heap_num, &gc_required, x6, x4, heap_num_map);
+ __ Str(x13, FieldMemOperand(heap_num, HeapNumber::kValueOffset));
+ __ Mov(x13, dst_elements);
+ __ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
+ __ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ __ B(&entry);
+
+ // Replace the-hole NaN with the-hole pointer.
+ __ Bind(&convert_hole);
+ __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
+
+ __ Bind(&entry);
+ __ Cmp(dst_elements, dst_end);
+ __ B(lt, &loop);
+ }
+
+ __ Pop(value, key, receiver, target_map);
+ // Replace receiver's backing store with newly created and filled FixedArray.
+ __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
+ kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Pop(lr);
+
+ __ Bind(&only_change_map);
+ __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+}
+
+
+bool Code::IsYoungSequence(byte* sequence) {
+ return MacroAssembler::IsYoungSequence(sequence);
+}
+
+
+void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(sequence)) {
+ *age = kNoAgeCodeAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ byte* target = sequence + kCodeAgeStubEntryOffset;
+ Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ PatchingAssembler patcher(sequence, kCodeAgeSequenceSize / kInstructionSize);
+ if (age == kNoAgeCodeAge) {
+ MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
+ } else {
+ Code * stub = GetCodeAgeStub(isolate, age, parity);
+ MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
+ }
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime) {
+ ASSERT(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
+ // Fetch the instance type of the receiver into result register.
+ __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ TestAndBranchIfAllClear(result, kSlicedNotConsMask, &cons_string);
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ Ldr(result.W(),
+ UntagSmiFieldMemOperand(string, SlicedString::kOffsetOffset));
+ __ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
+ __ Add(index, index, result.W());
+ __ B(&indirect_string_loaded);
+
+ // Handle cons strings.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ Bind(&cons_string);
+ __ Ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
+ __ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
+ // Get the first of the two strings and load its instance type.
+ __ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+ __ Bind(&indirect_string_loaded);
+ __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // Distinguish sequential and external strings. Only these two string
+ // representations can reach here (slices and flat cons strings have been
+ // reduced to the underlying sequential or external string).
+ Label external_string, check_encoding;
+ __ Bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ TestAndBranchIfAnySet(result, kStringRepresentationMask, &external_string);
+
+ // Prepare sequential strings
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Add(string, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ B(&check_encoding);
+
+ // Handle external strings.
+ __ Bind(&external_string);
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ Tst(result, kIsIndirectStringMask);
+ __ Assert(eq, kExternalStringExpectedButNotFound);
+ }
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ // TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
+ // can be bound far away in deferred code.
+ __ Tst(result, kShortExternalStringMask);
+ __ B(ne, call_runtime);
+ __ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+ Label ascii, done;
+ __ Bind(&check_encoding);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ TestAndBranchIfAnySet(result, kStringEncodingMask, &ascii);
+ // Two-byte string.
+ __ Ldrh(result, MemOperand(string, index, SXTW, 1));
+ __ B(&done);
+ __ Bind(&ascii);
+ // Ascii string.
+ __ Ldrb(result, MemOperand(string, index, SXTW));
+ __ Bind(&done);
+}
+
+
+static MemOperand ExpConstant(Register base, int index) {
+ return MemOperand(base, index * kDoubleSize);
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+ DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_temp1,
+ DoubleRegister double_temp2,
+ Register temp1,
+ Register temp2,
+ Register temp3) {
+ // TODO(jbramley): There are several instances where fnmsub could be used
+ // instead of fmul and fsub. Doing this changes the result, but since this is
+ // an estimation anyway, does it matter?
+
+ ASSERT(!AreAliased(input, result,
+ double_temp1, double_temp2,
+ temp1, temp2, temp3));
+ ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+
+ Label done;
+ DoubleRegister double_temp3 = result;
+ Register constants = temp3;
+
+ // The algorithm used relies on some magic constants which are initialized in
+ // ExternalReference::InitializeMathExpData().
+
+ // Load the address of the start of the array.
+ __ Mov(constants, ExternalReference::math_exp_constants(0));
+
+ // We have to do a four-way split here:
+ // - If input <= about -708.4, the output always rounds to zero.
+ // - If input >= about 709.8, the output always rounds to +infinity.
+ // - If the input is NaN, the output is NaN.
+ // - Otherwise, the result needs to be calculated.
+ Label result_is_finite_non_zero;
+ // Assert that we can load offset 0 (the small input threshold) and offset 1
+ // (the large input threshold) with a single ldp.
+ ASSERT(kDRegSize == (ExpConstant(constants, 1).offset() -
+ ExpConstant(constants, 0).offset()));
+ __ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
+
+ __ Fcmp(input, double_temp1);
+ __ Fccmp(input, double_temp2, NoFlag, hi);
+ // At this point, the condition flags can be in one of five states:
+ // NZCV
+ // 1000 -708.4 < input < 709.8 result = exp(input)
+ // 0110 input == 709.8 result = +infinity
+ // 0010 input > 709.8 result = +infinity
+ // 0011 input is NaN result = input
+ // 0000 input <= -708.4 result = +0.0
+
+ // Continue the common case first. 'mi' tests N == 1.
+ __ B(&result_is_finite_non_zero, mi);
+
+ // TODO(jbramley): Consider adding a +infinity register for ARM64.
+ __ Ldr(double_temp2, ExpConstant(constants, 2)); // Synthesize +infinity.
+
+ // Select between +0.0 and +infinity. 'lo' tests C == 0.
+ __ Fcsel(result, fp_zero, double_temp2, lo);
+ // Select between {+0.0 or +infinity} and input. 'vc' tests V == 0.
+ __ Fcsel(result, result, input, vc);
+ __ B(&done);
+
+ // The rest is magic, as described in InitializeMathExpData().
+ __ Bind(&result_is_finite_non_zero);
+
+ // Assert that we can load offset 3 and offset 4 with a single ldp.
+ ASSERT(kDRegSize == (ExpConstant(constants, 4).offset() -
+ ExpConstant(constants, 3).offset()));
+ __ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
+ __ Fmadd(double_temp1, double_temp1, input, double_temp3);
+ __ Fmov(temp2.W(), double_temp1.S());
+ __ Fsub(double_temp1, double_temp1, double_temp3);
+
+ // Assert that we can load offset 5 and offset 6 with a single ldp.
+ ASSERT(kDRegSize == (ExpConstant(constants, 6).offset() -
+ ExpConstant(constants, 5).offset()));
+ __ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
+ // TODO(jbramley): Consider using Fnmsub here.
+ __ Fmul(double_temp1, double_temp1, double_temp2);
+ __ Fsub(double_temp1, double_temp1, input);
+
+ __ Fmul(double_temp2, double_temp1, double_temp1);
+ __ Fsub(double_temp3, double_temp3, double_temp1);
+ __ Fmul(double_temp3, double_temp3, double_temp2);
+
+ __ Mov(temp1.W(), Operand(temp2.W(), LSR, 11));
+
+ __ Ldr(double_temp2, ExpConstant(constants, 7));
+ // TODO(jbramley): Consider using Fnmsub here.
+ __ Fmul(double_temp3, double_temp3, double_temp2);
+ __ Fsub(double_temp3, double_temp3, double_temp1);
+
+ // The 8th constant is 1.0, so use an immediate move rather than a load.
+ // We can't generate a runtime assertion here as we would need to call Abort
+ // in the runtime and we don't have an Isolate when we generate this code.
+ __ Fmov(double_temp2, 1.0);
+ __ Fadd(double_temp3, double_temp3, double_temp2);
+
+ __ And(temp2, temp2, 0x7ff);
+ __ Add(temp1, temp1, 0x3ff);
+
+ // Do the final table lookup.
+ __ Mov(temp3, ExternalReference::math_exp_log_table());
+
+ __ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeLog2));
+ __ Ldp(temp2.W(), temp3.W(), MemOperand(temp3));
+ __ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
+ __ Bfi(temp2, temp1, 32, 32);
+ __ Fmov(double_temp1, temp2);
+
+ __ Fmul(result, double_temp3, double_temp1);
+
+ __ Bind(&done);
+}
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/codegen-arm64.h b/deps/v8/src/arm64/codegen-arm64.h
new file mode 100644
index 000000000..4d8a9a85a
--- /dev/null
+++ b/deps/v8/src/arm64/codegen-arm64.h
@@ -0,0 +1,71 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_CODEGEN_ARM64_H_
+#define V8_ARM64_CODEGEN_ARM64_H_
+
+#include "ast.h"
+#include "ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+class StringCharLoadGenerator : public AllStatic {
+ public:
+ // Generates the code for handling different string types and loading the
+ // indexed character into |result|. We expect |index| as untagged input and
+ // |result| as untagged output. Register index is asserted to be a 32-bit W
+ // register.
+ static void Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
+
+class MathExpGenerator : public AllStatic {
+ public:
+ static void EmitMathExp(MacroAssembler* masm,
+ DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_scratch1,
+ DoubleRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_CODEGEN_ARM64_H_
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
new file mode 100644
index 000000000..8866e23cf
--- /dev/null
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -0,0 +1,1271 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_CONSTANTS_ARM64_H_
+#define V8_ARM64_CONSTANTS_ARM64_H_
+
+
+// Assert that this is an LP64 system.
+STATIC_ASSERT(sizeof(int) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(long) == sizeof(int64_t)); // NOLINT(runtime/int)
+STATIC_ASSERT(sizeof(void *) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(1) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(1L) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
+
+
+// Get the standard printf format macros for C99 stdint types.
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+
+
+namespace v8 {
+namespace internal {
+
+
+const unsigned kInstructionSize = 4;
+const unsigned kInstructionSizeLog2 = 2;
+const unsigned kLiteralEntrySize = 4;
+const unsigned kLiteralEntrySizeLog2 = 2;
+const unsigned kMaxLoadLiteralRange = 1 * MB;
+
+const unsigned kNumberOfRegisters = 32;
+const unsigned kNumberOfFPRegisters = 32;
+// Callee saved registers are x19-x30(lr).
+const int kNumberOfCalleeSavedRegisters = 11;
+const int kFirstCalleeSavedRegisterIndex = 19;
+// Callee saved FP registers are d8-d15.
+const int kNumberOfCalleeSavedFPRegisters = 8;
+const int kFirstCalleeSavedFPRegisterIndex = 8;
+// Callee saved registers with no specific purpose in JS are x19-x25.
+const unsigned kJSCalleeSavedRegList = 0x03f80000;
+// TODO(all): k<Y>RegSize should probably be k<Y>RegSizeInBits.
+const unsigned kWRegSizeInBits = 32;
+const unsigned kWRegSizeInBitsLog2 = 5;
+const unsigned kWRegSize = kWRegSizeInBits >> 3;
+const unsigned kWRegSizeLog2 = kWRegSizeInBitsLog2 - 3;
+const unsigned kXRegSizeInBits = 64;
+const unsigned kXRegSizeInBitsLog2 = 6;
+const unsigned kXRegSize = kXRegSizeInBits >> 3;
+const unsigned kXRegSizeLog2 = kXRegSizeInBitsLog2 - 3;
+const unsigned kSRegSizeInBits = 32;
+const unsigned kSRegSizeInBitsLog2 = 5;
+const unsigned kSRegSize = kSRegSizeInBits >> 3;
+const unsigned kSRegSizeLog2 = kSRegSizeInBitsLog2 - 3;
+const unsigned kDRegSizeInBits = 64;
+const unsigned kDRegSizeInBitsLog2 = 6;
+const unsigned kDRegSize = kDRegSizeInBits >> 3;
+const unsigned kDRegSizeLog2 = kDRegSizeInBitsLog2 - 3;
+const int64_t kWRegMask = 0x00000000ffffffffL;
+const int64_t kXRegMask = 0xffffffffffffffffL;
+const int64_t kSRegMask = 0x00000000ffffffffL;
+const int64_t kDRegMask = 0xffffffffffffffffL;
+// TODO(all) check if the expression below works on all compilers or if it
+// triggers an overflow error.
+const int64_t kDSignBit = 63;
+const int64_t kDSignMask = 0x1L << kDSignBit;
+const int64_t kSSignBit = 31;
+const int64_t kSSignMask = 0x1L << kSSignBit;
+const int64_t kXSignBit = 63;
+const int64_t kXSignMask = 0x1L << kXSignBit;
+const int64_t kWSignBit = 31;
+const int64_t kWSignMask = 0x1L << kWSignBit;
+const int64_t kDQuietNanBit = 51;
+const int64_t kDQuietNanMask = 0x1L << kDQuietNanBit;
+const int64_t kSQuietNanBit = 22;
+const int64_t kSQuietNanMask = 0x1L << kSQuietNanBit;
+const int64_t kByteMask = 0xffL;
+const int64_t kHalfWordMask = 0xffffL;
+const int64_t kWordMask = 0xffffffffL;
+const uint64_t kXMaxUInt = 0xffffffffffffffffUL;
+const uint64_t kWMaxUInt = 0xffffffffUL;
+const int64_t kXMaxInt = 0x7fffffffffffffffL;
+const int64_t kXMinInt = 0x8000000000000000L;
+const int32_t kWMaxInt = 0x7fffffff;
+const int32_t kWMinInt = 0x80000000;
+const unsigned kFramePointerRegCode = 29;
+const unsigned kLinkRegCode = 30;
+const unsigned kZeroRegCode = 31;
+const unsigned kJSSPCode = 28;
+const unsigned kSPRegInternalCode = 63;
+const unsigned kRegCodeMask = 0x1f;
+// Standard machine types defined by AAPCS64.
+const unsigned kByteSize = 8;
+const unsigned kByteSizeInBytes = kByteSize >> 3;
+const unsigned kHalfWordSize = 16;
+const unsigned kHalfWordSizeLog2 = 4;
+const unsigned kHalfWordSizeInBytes = kHalfWordSize >> 3;
+const unsigned kHalfWordSizeInBytesLog2 = kHalfWordSizeLog2 - 3;
+const unsigned kWordSize = 32;
+const unsigned kWordSizeLog2 = 5;
+const unsigned kWordSizeInBytes = kWordSize >> 3;
+const unsigned kWordSizeInBytesLog2 = kWordSizeLog2 - 3;
+const unsigned kDoubleWordSize = 64;
+const unsigned kDoubleWordSizeInBytes = kDoubleWordSize >> 3;
+const unsigned kQuadWordSize = 128;
+const unsigned kQuadWordSizeInBytes = kQuadWordSize >> 3;
+// AArch64 floating-point specifics. These match IEEE-754.
+const unsigned kDoubleMantissaBits = 52;
+const unsigned kDoubleExponentBits = 11;
+const unsigned kFloatMantissaBits = 23;
+const unsigned kFloatExponentBits = 8;
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+#define INSTRUCTION_FIELDS_LIST(V_) \
+/* Register fields */ \
+V_(Rd, 4, 0, Bits) /* Destination register. */ \
+V_(Rn, 9, 5, Bits) /* First source register. */ \
+V_(Rm, 20, 16, Bits) /* Second source register. */ \
+V_(Ra, 14, 10, Bits) /* Third source register. */ \
+V_(Rt, 4, 0, Bits) /* Load dest / store source. */ \
+V_(Rt2, 14, 10, Bits) /* Load second dest / */ \
+ /* store second source. */ \
+V_(PrefetchMode, 4, 0, Bits) \
+ \
+/* Common bits */ \
+V_(SixtyFourBits, 31, 31, Bits) \
+V_(FlagsUpdate, 29, 29, Bits) \
+ \
+/* PC relative addressing */ \
+V_(ImmPCRelHi, 23, 5, SignedBits) \
+V_(ImmPCRelLo, 30, 29, Bits) \
+ \
+/* Add/subtract/logical shift register */ \
+V_(ShiftDP, 23, 22, Bits) \
+V_(ImmDPShift, 15, 10, Bits) \
+ \
+/* Add/subtract immediate */ \
+V_(ImmAddSub, 21, 10, Bits) \
+V_(ShiftAddSub, 23, 22, Bits) \
+ \
+/* Add/substract extend */ \
+V_(ImmExtendShift, 12, 10, Bits) \
+V_(ExtendMode, 15, 13, Bits) \
+ \
+/* Move wide */ \
+V_(ImmMoveWide, 20, 5, Bits) \
+V_(ShiftMoveWide, 22, 21, Bits) \
+ \
+/* Logical immediate, bitfield and extract */ \
+V_(BitN, 22, 22, Bits) \
+V_(ImmRotate, 21, 16, Bits) \
+V_(ImmSetBits, 15, 10, Bits) \
+V_(ImmR, 21, 16, Bits) \
+V_(ImmS, 15, 10, Bits) \
+ \
+/* Test and branch immediate */ \
+V_(ImmTestBranch, 18, 5, SignedBits) \
+V_(ImmTestBranchBit40, 23, 19, Bits) \
+V_(ImmTestBranchBit5, 31, 31, Bits) \
+ \
+/* Conditionals */ \
+V_(Condition, 15, 12, Bits) \
+V_(ConditionBranch, 3, 0, Bits) \
+V_(Nzcv, 3, 0, Bits) \
+V_(ImmCondCmp, 20, 16, Bits) \
+V_(ImmCondBranch, 23, 5, SignedBits) \
+ \
+/* Floating point */ \
+V_(FPType, 23, 22, Bits) \
+V_(ImmFP, 20, 13, Bits) \
+V_(FPScale, 15, 10, Bits) \
+ \
+/* Load Store */ \
+V_(ImmLS, 20, 12, SignedBits) \
+V_(ImmLSUnsigned, 21, 10, Bits) \
+V_(ImmLSPair, 21, 15, SignedBits) \
+V_(SizeLS, 31, 30, Bits) \
+V_(ImmShiftLS, 12, 12, Bits) \
+ \
+/* Other immediates */ \
+V_(ImmUncondBranch, 25, 0, SignedBits) \
+V_(ImmCmpBranch, 23, 5, SignedBits) \
+V_(ImmLLiteral, 23, 5, SignedBits) \
+V_(ImmException, 20, 5, Bits) \
+V_(ImmHint, 11, 5, Bits) \
+V_(ImmBarrierDomain, 11, 10, Bits) \
+V_(ImmBarrierType, 9, 8, Bits) \
+ \
+/* System (MRS, MSR) */ \
+V_(ImmSystemRegister, 19, 5, Bits) \
+V_(SysO0, 19, 19, Bits) \
+V_(SysOp1, 18, 16, Bits) \
+V_(SysOp2, 7, 5, Bits) \
+V_(CRn, 15, 12, Bits) \
+V_(CRm, 11, 8, Bits) \
+
+
+#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
+/* NZCV */ \
+V_(Flags, 31, 28, Bits, uint32_t) \
+V_(N, 31, 31, Bits, bool) \
+V_(Z, 30, 30, Bits, bool) \
+V_(C, 29, 29, Bits, bool) \
+V_(V, 28, 28, Bits, uint32_t) \
+M_(NZCV, Flags_mask) \
+ \
+/* FPCR */ \
+V_(AHP, 26, 26, Bits, bool) \
+V_(DN, 25, 25, Bits, bool) \
+V_(FZ, 24, 24, Bits, bool) \
+V_(RMode, 23, 22, Bits, FPRounding) \
+M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask)
+
+
+// Fields offsets.
+#define DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, unused_1, unused_2) \
+ const int Name##_offset = LowBit; \
+ const int Name##_width = HighBit - LowBit + 1; \
+ const uint32_t Name##_mask = ((1 << Name##_width) - 1) << LowBit;
+#define DECLARE_INSTRUCTION_FIELDS_OFFSETS(Name, HighBit, LowBit, unused_1) \
+ DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, unused_1, unused_2)
+#define NOTHING(A, B)
+INSTRUCTION_FIELDS_LIST(DECLARE_INSTRUCTION_FIELDS_OFFSETS)
+SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING)
+#undef NOTHING
+#undef DECLARE_FIELDS_OFFSETS
+#undef DECLARE_INSTRUCTION_FIELDS_OFFSETS
+
+// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), formed
+// from ImmPCRelLo and ImmPCRelHi.
+const int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask;
+
+// Condition codes.
+enum Condition {
+ eq = 0,
+ ne = 1,
+ hs = 2,
+ lo = 3,
+ mi = 4,
+ pl = 5,
+ vs = 6,
+ vc = 7,
+ hi = 8,
+ ls = 9,
+ ge = 10,
+ lt = 11,
+ gt = 12,
+ le = 13,
+ al = 14,
+ nv = 15 // Behaves as always/al.
+};
+
+inline Condition InvertCondition(Condition cond) {
+ // Conditions al and nv behave identically, as "always true". They can't be
+ // inverted, because there is no never condition.
+ ASSERT((cond != al) && (cond != nv));
+ return static_cast<Condition>(cond ^ 1);
+}
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseConditionForCmp(Condition cond) {
+ switch (cond) {
+ case lo:
+ return hi;
+ case hi:
+ return lo;
+ case hs:
+ return ls;
+ case ls:
+ return hs;
+ case lt:
+ return gt;
+ case gt:
+ return lt;
+ case ge:
+ return le;
+ case le:
+ return ge;
+ case eq:
+ return eq;
+ default:
+ // In practice this function is only used with a condition coming from
+ // TokenToCondition in lithium-codegen-arm64.cc. Any other condition is
+ // invalid as it doesn't necessary make sense to reverse it (consider
+ // 'mi' for instance).
+ UNREACHABLE();
+ return nv;
+ };
+}
+
+enum FlagsUpdate {
+ SetFlags = 1,
+ LeaveFlags = 0
+};
+
+enum StatusFlags {
+ NoFlag = 0,
+
+ // Derive the flag combinations from the system register bit descriptions.
+ NFlag = N_mask,
+ ZFlag = Z_mask,
+ CFlag = C_mask,
+ VFlag = V_mask,
+ NZFlag = NFlag | ZFlag,
+ NCFlag = NFlag | CFlag,
+ NVFlag = NFlag | VFlag,
+ ZCFlag = ZFlag | CFlag,
+ ZVFlag = ZFlag | VFlag,
+ CVFlag = CFlag | VFlag,
+ NZCFlag = NFlag | ZFlag | CFlag,
+ NZVFlag = NFlag | ZFlag | VFlag,
+ NCVFlag = NFlag | CFlag | VFlag,
+ ZCVFlag = ZFlag | CFlag | VFlag,
+ NZCVFlag = NFlag | ZFlag | CFlag | VFlag,
+
+ // Floating-point comparison results.
+ FPEqualFlag = ZCFlag,
+ FPLessThanFlag = NFlag,
+ FPGreaterThanFlag = CFlag,
+ FPUnorderedFlag = CVFlag
+};
+
+enum Shift {
+ NO_SHIFT = -1,
+ LSL = 0x0,
+ LSR = 0x1,
+ ASR = 0x2,
+ ROR = 0x3
+};
+
+enum Extend {
+ NO_EXTEND = -1,
+ UXTB = 0,
+ UXTH = 1,
+ UXTW = 2,
+ UXTX = 3,
+ SXTB = 4,
+ SXTH = 5,
+ SXTW = 6,
+ SXTX = 7
+};
+
+enum SystemHint {
+ NOP = 0,
+ YIELD = 1,
+ WFE = 2,
+ WFI = 3,
+ SEV = 4,
+ SEVL = 5
+};
+
+enum BarrierDomain {
+ OuterShareable = 0,
+ NonShareable = 1,
+ InnerShareable = 2,
+ FullSystem = 3
+};
+
+enum BarrierType {
+ BarrierOther = 0,
+ BarrierReads = 1,
+ BarrierWrites = 2,
+ BarrierAll = 3
+};
+
+// System/special register names.
+// This information is not encoded as one field but as the concatenation of
+// multiple fields (Op0<0>, Op1, Crn, Crm, Op2).
+enum SystemRegister {
+ NZCV = ((0x1 << SysO0_offset) |
+ (0x3 << SysOp1_offset) |
+ (0x4 << CRn_offset) |
+ (0x2 << CRm_offset) |
+ (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset,
+ FPCR = ((0x1 << SysO0_offset) |
+ (0x3 << SysOp1_offset) |
+ (0x4 << CRn_offset) |
+ (0x4 << CRm_offset) |
+ (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset
+};
+
+// Instruction enumerations.
+//
+// These are the masks that define a class of instructions, and the list of
+// instructions within each class. Each enumeration has a Fixed, FMask and
+// Mask value.
+//
+// Fixed: The fixed bits in this instruction class.
+// FMask: The mask used to extract the fixed bits in the class.
+// Mask: The mask used to identify the instructions within a class.
+//
+// The enumerations can be used like this:
+//
+// ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
+// switch(instr->Mask(PCRelAddressingMask)) {
+// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break;
+// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break;
+// default: printf("Unknown instruction\n");
+// }
+
+
+// Generic fields.
+enum GenericInstrField {
+ SixtyFourBits = 0x80000000,
+ ThirtyTwoBits = 0x00000000,
+ FP32 = 0x00000000,
+ FP64 = 0x00400000
+};
+
+// PC relative addressing.
+enum PCRelAddressingOp {
+ PCRelAddressingFixed = 0x10000000,
+ PCRelAddressingFMask = 0x1F000000,
+ PCRelAddressingMask = 0x9F000000,
+ ADR = PCRelAddressingFixed | 0x00000000,
+ ADRP = PCRelAddressingFixed | 0x80000000
+};
+
+// Add/sub (immediate, shifted and extended.)
+const int kSFOffset = 31;
+enum AddSubOp {
+ AddSubOpMask = 0x60000000,
+ AddSubSetFlagsBit = 0x20000000,
+ ADD = 0x00000000,
+ ADDS = ADD | AddSubSetFlagsBit,
+ SUB = 0x40000000,
+ SUBS = SUB | AddSubSetFlagsBit
+};
+
+#define ADD_SUB_OP_LIST(V) \
+ V(ADD), \
+ V(ADDS), \
+ V(SUB), \
+ V(SUBS)
+
+enum AddSubImmediateOp {
+ AddSubImmediateFixed = 0x11000000,
+ AddSubImmediateFMask = 0x1F000000,
+ AddSubImmediateMask = 0xFF000000,
+ #define ADD_SUB_IMMEDIATE(A) \
+ A##_w_imm = AddSubImmediateFixed | A, \
+ A##_x_imm = AddSubImmediateFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_IMMEDIATE)
+ #undef ADD_SUB_IMMEDIATE
+};
+
+enum AddSubShiftedOp {
+ AddSubShiftedFixed = 0x0B000000,
+ AddSubShiftedFMask = 0x1F200000,
+ AddSubShiftedMask = 0xFF200000,
+ #define ADD_SUB_SHIFTED(A) \
+ A##_w_shift = AddSubShiftedFixed | A, \
+ A##_x_shift = AddSubShiftedFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_SHIFTED)
+ #undef ADD_SUB_SHIFTED
+};
+
+enum AddSubExtendedOp {
+ AddSubExtendedFixed = 0x0B200000,
+ AddSubExtendedFMask = 0x1F200000,
+ AddSubExtendedMask = 0xFFE00000,
+ #define ADD_SUB_EXTENDED(A) \
+ A##_w_ext = AddSubExtendedFixed | A, \
+ A##_x_ext = AddSubExtendedFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_EXTENDED)
+ #undef ADD_SUB_EXTENDED
+};
+
+// Add/sub with carry.
+enum AddSubWithCarryOp {
+ AddSubWithCarryFixed = 0x1A000000,
+ AddSubWithCarryFMask = 0x1FE00000,
+ AddSubWithCarryMask = 0xFFE0FC00,
+ ADC_w = AddSubWithCarryFixed | ADD,
+ ADC_x = AddSubWithCarryFixed | ADD | SixtyFourBits,
+ ADC = ADC_w,
+ ADCS_w = AddSubWithCarryFixed | ADDS,
+ ADCS_x = AddSubWithCarryFixed | ADDS | SixtyFourBits,
+ SBC_w = AddSubWithCarryFixed | SUB,
+ SBC_x = AddSubWithCarryFixed | SUB | SixtyFourBits,
+ SBC = SBC_w,
+ SBCS_w = AddSubWithCarryFixed | SUBS,
+ SBCS_x = AddSubWithCarryFixed | SUBS | SixtyFourBits
+};
+
+
+// Logical (immediate and shifted register).
+enum LogicalOp {
+ LogicalOpMask = 0x60200000,
+ NOT = 0x00200000,
+ AND = 0x00000000,
+ BIC = AND | NOT,
+ ORR = 0x20000000,
+ ORN = ORR | NOT,
+ EOR = 0x40000000,
+ EON = EOR | NOT,
+ ANDS = 0x60000000,
+ BICS = ANDS | NOT
+};
+
+// Logical immediate.
+enum LogicalImmediateOp {
+ LogicalImmediateFixed = 0x12000000,
+ LogicalImmediateFMask = 0x1F800000,
+ LogicalImmediateMask = 0xFF800000,
+ AND_w_imm = LogicalImmediateFixed | AND,
+ AND_x_imm = LogicalImmediateFixed | AND | SixtyFourBits,
+ ORR_w_imm = LogicalImmediateFixed | ORR,
+ ORR_x_imm = LogicalImmediateFixed | ORR | SixtyFourBits,
+ EOR_w_imm = LogicalImmediateFixed | EOR,
+ EOR_x_imm = LogicalImmediateFixed | EOR | SixtyFourBits,
+ ANDS_w_imm = LogicalImmediateFixed | ANDS,
+ ANDS_x_imm = LogicalImmediateFixed | ANDS | SixtyFourBits
+};
+
+// Logical shifted register.
+enum LogicalShiftedOp {
+ LogicalShiftedFixed = 0x0A000000,
+ LogicalShiftedFMask = 0x1F000000,
+ LogicalShiftedMask = 0xFF200000,
+ AND_w = LogicalShiftedFixed | AND,
+ AND_x = LogicalShiftedFixed | AND | SixtyFourBits,
+ AND_shift = AND_w,
+ BIC_w = LogicalShiftedFixed | BIC,
+ BIC_x = LogicalShiftedFixed | BIC | SixtyFourBits,
+ BIC_shift = BIC_w,
+ ORR_w = LogicalShiftedFixed | ORR,
+ ORR_x = LogicalShiftedFixed | ORR | SixtyFourBits,
+ ORR_shift = ORR_w,
+ ORN_w = LogicalShiftedFixed | ORN,
+ ORN_x = LogicalShiftedFixed | ORN | SixtyFourBits,
+ ORN_shift = ORN_w,
+ EOR_w = LogicalShiftedFixed | EOR,
+ EOR_x = LogicalShiftedFixed | EOR | SixtyFourBits,
+ EOR_shift = EOR_w,
+ EON_w = LogicalShiftedFixed | EON,
+ EON_x = LogicalShiftedFixed | EON | SixtyFourBits,
+ EON_shift = EON_w,
+ ANDS_w = LogicalShiftedFixed | ANDS,
+ ANDS_x = LogicalShiftedFixed | ANDS | SixtyFourBits,
+ ANDS_shift = ANDS_w,
+ BICS_w = LogicalShiftedFixed | BICS,
+ BICS_x = LogicalShiftedFixed | BICS | SixtyFourBits,
+ BICS_shift = BICS_w
+};
+
+// Move wide immediate.
+enum MoveWideImmediateOp {
+ MoveWideImmediateFixed = 0x12800000,
+ MoveWideImmediateFMask = 0x1F800000,
+ MoveWideImmediateMask = 0xFF800000,
+ MOVN = 0x00000000,
+ MOVZ = 0x40000000,
+ MOVK = 0x60000000,
+ MOVN_w = MoveWideImmediateFixed | MOVN,
+ MOVN_x = MoveWideImmediateFixed | MOVN | SixtyFourBits,
+ MOVZ_w = MoveWideImmediateFixed | MOVZ,
+ MOVZ_x = MoveWideImmediateFixed | MOVZ | SixtyFourBits,
+ MOVK_w = MoveWideImmediateFixed | MOVK,
+ MOVK_x = MoveWideImmediateFixed | MOVK | SixtyFourBits
+};
+
+// Bitfield.
+const int kBitfieldNOffset = 22;
+enum BitfieldOp {
+ BitfieldFixed = 0x13000000,
+ BitfieldFMask = 0x1F800000,
+ BitfieldMask = 0xFF800000,
+ SBFM_w = BitfieldFixed | 0x00000000,
+ SBFM_x = BitfieldFixed | 0x80000000,
+ SBFM = SBFM_w,
+ BFM_w = BitfieldFixed | 0x20000000,
+ BFM_x = BitfieldFixed | 0xA0000000,
+ BFM = BFM_w,
+ UBFM_w = BitfieldFixed | 0x40000000,
+ UBFM_x = BitfieldFixed | 0xC0000000,
+ UBFM = UBFM_w
+ // Bitfield N field.
+};
+
+// Extract.
+enum ExtractOp {
+ ExtractFixed = 0x13800000,
+ ExtractFMask = 0x1F800000,
+ ExtractMask = 0xFFA00000,
+ EXTR_w = ExtractFixed | 0x00000000,
+ EXTR_x = ExtractFixed | 0x80000000,
+ EXTR = EXTR_w
+};
+
+// Unconditional branch.
+enum UnconditionalBranchOp {
+ UnconditionalBranchFixed = 0x14000000,
+ UnconditionalBranchFMask = 0x7C000000,
+ UnconditionalBranchMask = 0xFC000000,
+ B = UnconditionalBranchFixed | 0x00000000,
+ BL = UnconditionalBranchFixed | 0x80000000
+};
+
+// Unconditional branch to register.
+enum UnconditionalBranchToRegisterOp {
+ UnconditionalBranchToRegisterFixed = 0xD6000000,
+ UnconditionalBranchToRegisterFMask = 0xFE000000,
+ UnconditionalBranchToRegisterMask = 0xFFFFFC1F,
+ BR = UnconditionalBranchToRegisterFixed | 0x001F0000,
+ BLR = UnconditionalBranchToRegisterFixed | 0x003F0000,
+ RET = UnconditionalBranchToRegisterFixed | 0x005F0000
+};
+
+// Compare and branch.
+enum CompareBranchOp {
+ CompareBranchFixed = 0x34000000,
+ CompareBranchFMask = 0x7E000000,
+ CompareBranchMask = 0xFF000000,
+ CBZ_w = CompareBranchFixed | 0x00000000,
+ CBZ_x = CompareBranchFixed | 0x80000000,
+ CBZ = CBZ_w,
+ CBNZ_w = CompareBranchFixed | 0x01000000,
+ CBNZ_x = CompareBranchFixed | 0x81000000,
+ CBNZ = CBNZ_w
+};
+
+// Test and branch.
+enum TestBranchOp {
+ TestBranchFixed = 0x36000000,
+ TestBranchFMask = 0x7E000000,
+ TestBranchMask = 0x7F000000,
+ TBZ = TestBranchFixed | 0x00000000,
+ TBNZ = TestBranchFixed | 0x01000000
+};
+
+// Conditional branch.
+enum ConditionalBranchOp {
+ ConditionalBranchFixed = 0x54000000,
+ ConditionalBranchFMask = 0xFE000000,
+ ConditionalBranchMask = 0xFF000010,
+ B_cond = ConditionalBranchFixed | 0x00000000
+};
+
+// System.
+// System instruction encoding is complicated because some instructions use op
+// and CR fields to encode parameters. To handle this cleanly, the system
+// instructions are split into more than one enum.
+
+enum SystemOp {
+ SystemFixed = 0xD5000000,
+ SystemFMask = 0xFFC00000
+};
+
+enum SystemSysRegOp {
+ SystemSysRegFixed = 0xD5100000,
+ SystemSysRegFMask = 0xFFD00000,
+ SystemSysRegMask = 0xFFF00000,
+ MRS = SystemSysRegFixed | 0x00200000,
+ MSR = SystemSysRegFixed | 0x00000000
+};
+
+enum SystemHintOp {
+ SystemHintFixed = 0xD503201F,
+ SystemHintFMask = 0xFFFFF01F,
+ SystemHintMask = 0xFFFFF01F,
+ HINT = SystemHintFixed | 0x00000000
+};
+
+// Exception.
+enum ExceptionOp {
+ ExceptionFixed = 0xD4000000,
+ ExceptionFMask = 0xFF000000,
+ ExceptionMask = 0xFFE0001F,
+ HLT = ExceptionFixed | 0x00400000,
+ BRK = ExceptionFixed | 0x00200000,
+ SVC = ExceptionFixed | 0x00000001,
+ HVC = ExceptionFixed | 0x00000002,
+ SMC = ExceptionFixed | 0x00000003,
+ DCPS1 = ExceptionFixed | 0x00A00001,
+ DCPS2 = ExceptionFixed | 0x00A00002,
+ DCPS3 = ExceptionFixed | 0x00A00003
+};
+// Code used to spot hlt instructions that should not be hit.
+const int kHltBadCode = 0xbad;
+
+enum MemBarrierOp {
+ MemBarrierFixed = 0xD503309F,
+ MemBarrierFMask = 0xFFFFF09F,
+ MemBarrierMask = 0xFFFFF0FF,
+ DSB = MemBarrierFixed | 0x00000000,
+ DMB = MemBarrierFixed | 0x00000020,
+ ISB = MemBarrierFixed | 0x00000040
+};
+
+// Any load or store (including pair).
+enum LoadStoreAnyOp {
+ LoadStoreAnyFMask = 0x0a000000,
+ LoadStoreAnyFixed = 0x08000000
+};
+
+// Any load pair or store pair.
+enum LoadStorePairAnyOp {
+ LoadStorePairAnyFMask = 0x3a000000,
+ LoadStorePairAnyFixed = 0x28000000
+};
+
+#define LOAD_STORE_PAIR_OP_LIST(V) \
+ V(STP, w, 0x00000000), \
+ V(LDP, w, 0x00400000), \
+ V(LDPSW, x, 0x40400000), \
+ V(STP, x, 0x80000000), \
+ V(LDP, x, 0x80400000), \
+ V(STP, s, 0x04000000), \
+ V(LDP, s, 0x04400000), \
+ V(STP, d, 0x44000000), \
+ V(LDP, d, 0x44400000)
+
+// Load/store pair (post, pre and offset.)
+enum LoadStorePairOp {
+ LoadStorePairMask = 0xC4400000,
+ LoadStorePairLBit = 1 << 22,
+ #define LOAD_STORE_PAIR(A, B, C) \
+ A##_##B = C
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR)
+ #undef LOAD_STORE_PAIR
+};
+
+enum LoadStorePairPostIndexOp {
+ LoadStorePairPostIndexFixed = 0x28800000,
+ LoadStorePairPostIndexFMask = 0x3B800000,
+ LoadStorePairPostIndexMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_POST_INDEX(A, B, C) \
+ A##_##B##_post = LoadStorePairPostIndexFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_POST_INDEX)
+ #undef LOAD_STORE_PAIR_POST_INDEX
+};
+
+enum LoadStorePairPreIndexOp {
+ LoadStorePairPreIndexFixed = 0x29800000,
+ LoadStorePairPreIndexFMask = 0x3B800000,
+ LoadStorePairPreIndexMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_PRE_INDEX(A, B, C) \
+ A##_##B##_pre = LoadStorePairPreIndexFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_PRE_INDEX)
+ #undef LOAD_STORE_PAIR_PRE_INDEX
+};
+
+enum LoadStorePairOffsetOp {
+ LoadStorePairOffsetFixed = 0x29000000,
+ LoadStorePairOffsetFMask = 0x3B800000,
+ LoadStorePairOffsetMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_OFFSET(A, B, C) \
+ A##_##B##_off = LoadStorePairOffsetFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_OFFSET)
+ #undef LOAD_STORE_PAIR_OFFSET
+};
+
+enum LoadStorePairNonTemporalOp {
+ LoadStorePairNonTemporalFixed = 0x28000000,
+ LoadStorePairNonTemporalFMask = 0x3B800000,
+ LoadStorePairNonTemporalMask = 0xFFC00000,
+ STNP_w = LoadStorePairNonTemporalFixed | STP_w,
+ LDNP_w = LoadStorePairNonTemporalFixed | LDP_w,
+ STNP_x = LoadStorePairNonTemporalFixed | STP_x,
+ LDNP_x = LoadStorePairNonTemporalFixed | LDP_x,
+ STNP_s = LoadStorePairNonTemporalFixed | STP_s,
+ LDNP_s = LoadStorePairNonTemporalFixed | LDP_s,
+ STNP_d = LoadStorePairNonTemporalFixed | STP_d,
+ LDNP_d = LoadStorePairNonTemporalFixed | LDP_d
+};
+
+// Load literal.
+enum LoadLiteralOp {
+ LoadLiteralFixed = 0x18000000,
+ LoadLiteralFMask = 0x3B000000,
+ LoadLiteralMask = 0xFF000000,
+ LDR_w_lit = LoadLiteralFixed | 0x00000000,
+ LDR_x_lit = LoadLiteralFixed | 0x40000000,
+ LDRSW_x_lit = LoadLiteralFixed | 0x80000000,
+ PRFM_lit = LoadLiteralFixed | 0xC0000000,
+ LDR_s_lit = LoadLiteralFixed | 0x04000000,
+ LDR_d_lit = LoadLiteralFixed | 0x44000000
+};
+
+#define LOAD_STORE_OP_LIST(V) \
+ V(ST, RB, w, 0x00000000), \
+ V(ST, RH, w, 0x40000000), \
+ V(ST, R, w, 0x80000000), \
+ V(ST, R, x, 0xC0000000), \
+ V(LD, RB, w, 0x00400000), \
+ V(LD, RH, w, 0x40400000), \
+ V(LD, R, w, 0x80400000), \
+ V(LD, R, x, 0xC0400000), \
+ V(LD, RSB, x, 0x00800000), \
+ V(LD, RSH, x, 0x40800000), \
+ V(LD, RSW, x, 0x80800000), \
+ V(LD, RSB, w, 0x00C00000), \
+ V(LD, RSH, w, 0x40C00000), \
+ V(ST, R, s, 0x84000000), \
+ V(ST, R, d, 0xC4000000), \
+ V(LD, R, s, 0x84400000), \
+ V(LD, R, d, 0xC4400000)
+
+
+// Load/store unscaled offset.
+enum LoadStoreUnscaledOffsetOp {
+ LoadStoreUnscaledOffsetFixed = 0x38000000,
+ LoadStoreUnscaledOffsetFMask = 0x3B200C00,
+ LoadStoreUnscaledOffsetMask = 0xFFE00C00,
+ #define LOAD_STORE_UNSCALED(A, B, C, D) \
+ A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED)
+ #undef LOAD_STORE_UNSCALED
+};
+
+// Load/store (post, pre, offset and unsigned.)
+enum LoadStoreOp {
+ LoadStoreOpMask = 0xC4C00000,
+ #define LOAD_STORE(A, B, C, D) \
+ A##B##_##C = D
+ LOAD_STORE_OP_LIST(LOAD_STORE),
+ #undef LOAD_STORE
+ PRFM = 0xC0800000
+};
+
+// Load/store post index.
+enum LoadStorePostIndex {
+ LoadStorePostIndexFixed = 0x38000400,
+ LoadStorePostIndexFMask = 0x3B200C00,
+ LoadStorePostIndexMask = 0xFFE00C00,
+ #define LOAD_STORE_POST_INDEX(A, B, C, D) \
+ A##B##_##C##_post = LoadStorePostIndexFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_POST_INDEX)
+ #undef LOAD_STORE_POST_INDEX
+};
+
+// Load/store pre index.
+enum LoadStorePreIndex {
+ LoadStorePreIndexFixed = 0x38000C00,
+ LoadStorePreIndexFMask = 0x3B200C00,
+ LoadStorePreIndexMask = 0xFFE00C00,
+ #define LOAD_STORE_PRE_INDEX(A, B, C, D) \
+ A##B##_##C##_pre = LoadStorePreIndexFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_PRE_INDEX)
+ #undef LOAD_STORE_PRE_INDEX
+};
+
+// Load/store unsigned offset.
+enum LoadStoreUnsignedOffset {
+ LoadStoreUnsignedOffsetFixed = 0x39000000,
+ LoadStoreUnsignedOffsetFMask = 0x3B000000,
+ LoadStoreUnsignedOffsetMask = 0xFFC00000,
+ PRFM_unsigned = LoadStoreUnsignedOffsetFixed | PRFM,
+ #define LOAD_STORE_UNSIGNED_OFFSET(A, B, C, D) \
+ A##B##_##C##_unsigned = LoadStoreUnsignedOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_UNSIGNED_OFFSET)
+ #undef LOAD_STORE_UNSIGNED_OFFSET
+};
+
+// Load/store register offset.
+enum LoadStoreRegisterOffset {
+ LoadStoreRegisterOffsetFixed = 0x38200800,
+ LoadStoreRegisterOffsetFMask = 0x3B200C00,
+ LoadStoreRegisterOffsetMask = 0xFFE00C00,
+ PRFM_reg = LoadStoreRegisterOffsetFixed | PRFM,
+ #define LOAD_STORE_REGISTER_OFFSET(A, B, C, D) \
+ A##B##_##C##_reg = LoadStoreRegisterOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_REGISTER_OFFSET)
+ #undef LOAD_STORE_REGISTER_OFFSET
+};
+
+// Conditional compare.
+enum ConditionalCompareOp {
+ ConditionalCompareMask = 0x60000000,
+ CCMN = 0x20000000,
+ CCMP = 0x60000000
+};
+
+// Conditional compare register.
+enum ConditionalCompareRegisterOp {
+ ConditionalCompareRegisterFixed = 0x1A400000,
+ ConditionalCompareRegisterFMask = 0x1FE00800,
+ ConditionalCompareRegisterMask = 0xFFE00C10,
+ CCMN_w = ConditionalCompareRegisterFixed | CCMN,
+ CCMN_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMN,
+ CCMP_w = ConditionalCompareRegisterFixed | CCMP,
+ CCMP_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMP
+};
+
+// Conditional compare immediate.
+enum ConditionalCompareImmediateOp {
+ ConditionalCompareImmediateFixed = 0x1A400800,
+ ConditionalCompareImmediateFMask = 0x1FE00800,
+ ConditionalCompareImmediateMask = 0xFFE00C10,
+ CCMN_w_imm = ConditionalCompareImmediateFixed | CCMN,
+ CCMN_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMN,
+ CCMP_w_imm = ConditionalCompareImmediateFixed | CCMP,
+ CCMP_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMP
+};
+
+// Conditional select.
+enum ConditionalSelectOp {
+ ConditionalSelectFixed = 0x1A800000,
+ ConditionalSelectFMask = 0x1FE00000,
+ ConditionalSelectMask = 0xFFE00C00,
+ CSEL_w = ConditionalSelectFixed | 0x00000000,
+ CSEL_x = ConditionalSelectFixed | 0x80000000,
+ CSEL = CSEL_w,
+ CSINC_w = ConditionalSelectFixed | 0x00000400,
+ CSINC_x = ConditionalSelectFixed | 0x80000400,
+ CSINC = CSINC_w,
+ CSINV_w = ConditionalSelectFixed | 0x40000000,
+ CSINV_x = ConditionalSelectFixed | 0xC0000000,
+ CSINV = CSINV_w,
+ CSNEG_w = ConditionalSelectFixed | 0x40000400,
+ CSNEG_x = ConditionalSelectFixed | 0xC0000400,
+ CSNEG = CSNEG_w
+};
+
+// Data processing 1 source.
+enum DataProcessing1SourceOp {
+ DataProcessing1SourceFixed = 0x5AC00000,
+ DataProcessing1SourceFMask = 0x5FE00000,
+ DataProcessing1SourceMask = 0xFFFFFC00,
+ RBIT = DataProcessing1SourceFixed | 0x00000000,
+ RBIT_w = RBIT,
+ RBIT_x = RBIT | SixtyFourBits,
+ REV16 = DataProcessing1SourceFixed | 0x00000400,
+ REV16_w = REV16,
+ REV16_x = REV16 | SixtyFourBits,
+ REV = DataProcessing1SourceFixed | 0x00000800,
+ REV_w = REV,
+ REV32_x = REV | SixtyFourBits,
+ REV_x = DataProcessing1SourceFixed | SixtyFourBits | 0x00000C00,
+ CLZ = DataProcessing1SourceFixed | 0x00001000,
+ CLZ_w = CLZ,
+ CLZ_x = CLZ | SixtyFourBits,
+ CLS = DataProcessing1SourceFixed | 0x00001400,
+ CLS_w = CLS,
+ CLS_x = CLS | SixtyFourBits
+};
+
+// Data processing 2 source.
+enum DataProcessing2SourceOp {
+ DataProcessing2SourceFixed = 0x1AC00000,
+ DataProcessing2SourceFMask = 0x5FE00000,
+ DataProcessing2SourceMask = 0xFFE0FC00,
+ UDIV_w = DataProcessing2SourceFixed | 0x00000800,
+ UDIV_x = DataProcessing2SourceFixed | 0x80000800,
+ UDIV = UDIV_w,
+ SDIV_w = DataProcessing2SourceFixed | 0x00000C00,
+ SDIV_x = DataProcessing2SourceFixed | 0x80000C00,
+ SDIV = SDIV_w,
+ LSLV_w = DataProcessing2SourceFixed | 0x00002000,
+ LSLV_x = DataProcessing2SourceFixed | 0x80002000,
+ LSLV = LSLV_w,
+ LSRV_w = DataProcessing2SourceFixed | 0x00002400,
+ LSRV_x = DataProcessing2SourceFixed | 0x80002400,
+ LSRV = LSRV_w,
+ ASRV_w = DataProcessing2SourceFixed | 0x00002800,
+ ASRV_x = DataProcessing2SourceFixed | 0x80002800,
+ ASRV = ASRV_w,
+ RORV_w = DataProcessing2SourceFixed | 0x00002C00,
+ RORV_x = DataProcessing2SourceFixed | 0x80002C00,
+ RORV = RORV_w,
+ CRC32B = DataProcessing2SourceFixed | 0x00004000,
+ CRC32H = DataProcessing2SourceFixed | 0x00004400,
+ CRC32W = DataProcessing2SourceFixed | 0x00004800,
+ CRC32X = DataProcessing2SourceFixed | SixtyFourBits | 0x00004C00,
+ CRC32CB = DataProcessing2SourceFixed | 0x00005000,
+ CRC32CH = DataProcessing2SourceFixed | 0x00005400,
+ CRC32CW = DataProcessing2SourceFixed | 0x00005800,
+ CRC32CX = DataProcessing2SourceFixed | SixtyFourBits | 0x00005C00
+};
+
+// Data processing 3 source.
+enum DataProcessing3SourceOp {
+ DataProcessing3SourceFixed = 0x1B000000,
+ DataProcessing3SourceFMask = 0x1F000000,
+ DataProcessing3SourceMask = 0xFFE08000,
+ MADD_w = DataProcessing3SourceFixed | 0x00000000,
+ MADD_x = DataProcessing3SourceFixed | 0x80000000,
+ MADD = MADD_w,
+ MSUB_w = DataProcessing3SourceFixed | 0x00008000,
+ MSUB_x = DataProcessing3SourceFixed | 0x80008000,
+ MSUB = MSUB_w,
+ SMADDL_x = DataProcessing3SourceFixed | 0x80200000,
+ SMSUBL_x = DataProcessing3SourceFixed | 0x80208000,
+ SMULH_x = DataProcessing3SourceFixed | 0x80400000,
+ UMADDL_x = DataProcessing3SourceFixed | 0x80A00000,
+ UMSUBL_x = DataProcessing3SourceFixed | 0x80A08000,
+ UMULH_x = DataProcessing3SourceFixed | 0x80C00000
+};
+
+// Floating point compare.
+enum FPCompareOp {
+ FPCompareFixed = 0x1E202000,
+ FPCompareFMask = 0x5F203C00,
+ FPCompareMask = 0xFFE0FC1F,
+ FCMP_s = FPCompareFixed | 0x00000000,
+ FCMP_d = FPCompareFixed | FP64 | 0x00000000,
+ FCMP = FCMP_s,
+ FCMP_s_zero = FPCompareFixed | 0x00000008,
+ FCMP_d_zero = FPCompareFixed | FP64 | 0x00000008,
+ FCMP_zero = FCMP_s_zero,
+ FCMPE_s = FPCompareFixed | 0x00000010,
+ FCMPE_d = FPCompareFixed | FP64 | 0x00000010,
+ FCMPE_s_zero = FPCompareFixed | 0x00000018,
+ FCMPE_d_zero = FPCompareFixed | FP64 | 0x00000018
+};
+
+// Floating point conditional compare.
+enum FPConditionalCompareOp {
+ FPConditionalCompareFixed = 0x1E200400,
+ FPConditionalCompareFMask = 0x5F200C00,
+ FPConditionalCompareMask = 0xFFE00C10,
+ FCCMP_s = FPConditionalCompareFixed | 0x00000000,
+ FCCMP_d = FPConditionalCompareFixed | FP64 | 0x00000000,
+ FCCMP = FCCMP_s,
+ FCCMPE_s = FPConditionalCompareFixed | 0x00000010,
+ FCCMPE_d = FPConditionalCompareFixed | FP64 | 0x00000010,
+ FCCMPE = FCCMPE_s
+};
+
+// Floating point conditional select.
+enum FPConditionalSelectOp {
+ FPConditionalSelectFixed = 0x1E200C00,
+ FPConditionalSelectFMask = 0x5F200C00,
+ FPConditionalSelectMask = 0xFFE00C00,
+ FCSEL_s = FPConditionalSelectFixed | 0x00000000,
+ FCSEL_d = FPConditionalSelectFixed | FP64 | 0x00000000,
+ FCSEL = FCSEL_s
+};
+
+// Floating point immediate.
+enum FPImmediateOp {
+ FPImmediateFixed = 0x1E201000,
+ FPImmediateFMask = 0x5F201C00,
+ FPImmediateMask = 0xFFE01C00,
+ FMOV_s_imm = FPImmediateFixed | 0x00000000,
+ FMOV_d_imm = FPImmediateFixed | FP64 | 0x00000000
+};
+
+// Floating point data processing 1 source.
+enum FPDataProcessing1SourceOp {
+ FPDataProcessing1SourceFixed = 0x1E204000,
+ FPDataProcessing1SourceFMask = 0x5F207C00,
+ FPDataProcessing1SourceMask = 0xFFFFFC00,
+ FMOV_s = FPDataProcessing1SourceFixed | 0x00000000,
+ FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000,
+ FMOV = FMOV_s,
+ FABS_s = FPDataProcessing1SourceFixed | 0x00008000,
+ FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000,
+ FABS = FABS_s,
+ FNEG_s = FPDataProcessing1SourceFixed | 0x00010000,
+ FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000,
+ FNEG = FNEG_s,
+ FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000,
+ FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000,
+ FSQRT = FSQRT_s,
+ FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000,
+ FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000,
+ FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000,
+ FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000,
+ FRINTN = FRINTN_s,
+ FRINTP_s = FPDataProcessing1SourceFixed | 0x00048000,
+ FRINTP_d = FPDataProcessing1SourceFixed | FP64 | 0x00048000,
+ FRINTP = FRINTP_s,
+ FRINTM_s = FPDataProcessing1SourceFixed | 0x00050000,
+ FRINTM_d = FPDataProcessing1SourceFixed | FP64 | 0x00050000,
+ FRINTM = FRINTM_s,
+ FRINTZ_s = FPDataProcessing1SourceFixed | 0x00058000,
+ FRINTZ_d = FPDataProcessing1SourceFixed | FP64 | 0x00058000,
+ FRINTZ = FRINTZ_s,
+ FRINTA_s = FPDataProcessing1SourceFixed | 0x00060000,
+ FRINTA_d = FPDataProcessing1SourceFixed | FP64 | 0x00060000,
+ FRINTA = FRINTA_s,
+ FRINTX_s = FPDataProcessing1SourceFixed | 0x00070000,
+ FRINTX_d = FPDataProcessing1SourceFixed | FP64 | 0x00070000,
+ FRINTX = FRINTX_s,
+ FRINTI_s = FPDataProcessing1SourceFixed | 0x00078000,
+ FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000,
+ FRINTI = FRINTI_s
+};
+
+// Floating point data processing 2 source.
+enum FPDataProcessing2SourceOp {
+ FPDataProcessing2SourceFixed = 0x1E200800,
+ FPDataProcessing2SourceFMask = 0x5F200C00,
+ FPDataProcessing2SourceMask = 0xFFE0FC00,
+ FMUL = FPDataProcessing2SourceFixed | 0x00000000,
+ FMUL_s = FMUL,
+ FMUL_d = FMUL | FP64,
+ FDIV = FPDataProcessing2SourceFixed | 0x00001000,
+ FDIV_s = FDIV,
+ FDIV_d = FDIV | FP64,
+ FADD = FPDataProcessing2SourceFixed | 0x00002000,
+ FADD_s = FADD,
+ FADD_d = FADD | FP64,
+ FSUB = FPDataProcessing2SourceFixed | 0x00003000,
+ FSUB_s = FSUB,
+ FSUB_d = FSUB | FP64,
+ FMAX = FPDataProcessing2SourceFixed | 0x00004000,
+ FMAX_s = FMAX,
+ FMAX_d = FMAX | FP64,
+ FMIN = FPDataProcessing2SourceFixed | 0x00005000,
+ FMIN_s = FMIN,
+ FMIN_d = FMIN | FP64,
+ FMAXNM = FPDataProcessing2SourceFixed | 0x00006000,
+ FMAXNM_s = FMAXNM,
+ FMAXNM_d = FMAXNM | FP64,
+ FMINNM = FPDataProcessing2SourceFixed | 0x00007000,
+ FMINNM_s = FMINNM,
+ FMINNM_d = FMINNM | FP64,
+ FNMUL = FPDataProcessing2SourceFixed | 0x00008000,
+ FNMUL_s = FNMUL,
+ FNMUL_d = FNMUL | FP64
+};
+
+// Floating point data processing 3 source.
+enum FPDataProcessing3SourceOp {
+ FPDataProcessing3SourceFixed = 0x1F000000,
+ FPDataProcessing3SourceFMask = 0x5F000000,
+ FPDataProcessing3SourceMask = 0xFFE08000,
+ FMADD_s = FPDataProcessing3SourceFixed | 0x00000000,
+ FMSUB_s = FPDataProcessing3SourceFixed | 0x00008000,
+ FNMADD_s = FPDataProcessing3SourceFixed | 0x00200000,
+ FNMSUB_s = FPDataProcessing3SourceFixed | 0x00208000,
+ FMADD_d = FPDataProcessing3SourceFixed | 0x00400000,
+ FMSUB_d = FPDataProcessing3SourceFixed | 0x00408000,
+ FNMADD_d = FPDataProcessing3SourceFixed | 0x00600000,
+ FNMSUB_d = FPDataProcessing3SourceFixed | 0x00608000
+};
+
+// Conversion between floating point and integer.
+enum FPIntegerConvertOp {
+ FPIntegerConvertFixed = 0x1E200000,
+ FPIntegerConvertFMask = 0x5F20FC00,
+ FPIntegerConvertMask = 0xFFFFFC00,
+ FCVTNS = FPIntegerConvertFixed | 0x00000000,
+ FCVTNS_ws = FCVTNS,
+ FCVTNS_xs = FCVTNS | SixtyFourBits,
+ FCVTNS_wd = FCVTNS | FP64,
+ FCVTNS_xd = FCVTNS | SixtyFourBits | FP64,
+ FCVTNU = FPIntegerConvertFixed | 0x00010000,
+ FCVTNU_ws = FCVTNU,
+ FCVTNU_xs = FCVTNU | SixtyFourBits,
+ FCVTNU_wd = FCVTNU | FP64,
+ FCVTNU_xd = FCVTNU | SixtyFourBits | FP64,
+ FCVTPS = FPIntegerConvertFixed | 0x00080000,
+ FCVTPS_ws = FCVTPS,
+ FCVTPS_xs = FCVTPS | SixtyFourBits,
+ FCVTPS_wd = FCVTPS | FP64,
+ FCVTPS_xd = FCVTPS | SixtyFourBits | FP64,
+ FCVTPU = FPIntegerConvertFixed | 0x00090000,
+ FCVTPU_ws = FCVTPU,
+ FCVTPU_xs = FCVTPU | SixtyFourBits,
+ FCVTPU_wd = FCVTPU | FP64,
+ FCVTPU_xd = FCVTPU | SixtyFourBits | FP64,
+ FCVTMS = FPIntegerConvertFixed | 0x00100000,
+ FCVTMS_ws = FCVTMS,
+ FCVTMS_xs = FCVTMS | SixtyFourBits,
+ FCVTMS_wd = FCVTMS | FP64,
+ FCVTMS_xd = FCVTMS | SixtyFourBits | FP64,
+ FCVTMU = FPIntegerConvertFixed | 0x00110000,
+ FCVTMU_ws = FCVTMU,
+ FCVTMU_xs = FCVTMU | SixtyFourBits,
+ FCVTMU_wd = FCVTMU | FP64,
+ FCVTMU_xd = FCVTMU | SixtyFourBits | FP64,
+ FCVTZS = FPIntegerConvertFixed | 0x00180000,
+ FCVTZS_ws = FCVTZS,
+ FCVTZS_xs = FCVTZS | SixtyFourBits,
+ FCVTZS_wd = FCVTZS | FP64,
+ FCVTZS_xd = FCVTZS | SixtyFourBits | FP64,
+ FCVTZU = FPIntegerConvertFixed | 0x00190000,
+ FCVTZU_ws = FCVTZU,
+ FCVTZU_xs = FCVTZU | SixtyFourBits,
+ FCVTZU_wd = FCVTZU | FP64,
+ FCVTZU_xd = FCVTZU | SixtyFourBits | FP64,
+ SCVTF = FPIntegerConvertFixed | 0x00020000,
+ SCVTF_sw = SCVTF,
+ SCVTF_sx = SCVTF | SixtyFourBits,
+ SCVTF_dw = SCVTF | FP64,
+ SCVTF_dx = SCVTF | SixtyFourBits | FP64,
+ UCVTF = FPIntegerConvertFixed | 0x00030000,
+ UCVTF_sw = UCVTF,
+ UCVTF_sx = UCVTF | SixtyFourBits,
+ UCVTF_dw = UCVTF | FP64,
+ UCVTF_dx = UCVTF | SixtyFourBits | FP64,
+ FCVTAS = FPIntegerConvertFixed | 0x00040000,
+ FCVTAS_ws = FCVTAS,
+ FCVTAS_xs = FCVTAS | SixtyFourBits,
+ FCVTAS_wd = FCVTAS | FP64,
+ FCVTAS_xd = FCVTAS | SixtyFourBits | FP64,
+ FCVTAU = FPIntegerConvertFixed | 0x00050000,
+ FCVTAU_ws = FCVTAU,
+ FCVTAU_xs = FCVTAU | SixtyFourBits,
+ FCVTAU_wd = FCVTAU | FP64,
+ FCVTAU_xd = FCVTAU | SixtyFourBits | FP64,
+ FMOV_ws = FPIntegerConvertFixed | 0x00060000,
+ FMOV_sw = FPIntegerConvertFixed | 0x00070000,
+ FMOV_xd = FMOV_ws | SixtyFourBits | FP64,
+ FMOV_dx = FMOV_sw | SixtyFourBits | FP64
+};
+
+// Conversion between fixed point and floating point.
+enum FPFixedPointConvertOp {
+ FPFixedPointConvertFixed = 0x1E000000,
+ FPFixedPointConvertFMask = 0x5F200000,
+ FPFixedPointConvertMask = 0xFFFF0000,
+ FCVTZS_fixed = FPFixedPointConvertFixed | 0x00180000,
+ FCVTZS_ws_fixed = FCVTZS_fixed,
+ FCVTZS_xs_fixed = FCVTZS_fixed | SixtyFourBits,
+ FCVTZS_wd_fixed = FCVTZS_fixed | FP64,
+ FCVTZS_xd_fixed = FCVTZS_fixed | SixtyFourBits | FP64,
+ FCVTZU_fixed = FPFixedPointConvertFixed | 0x00190000,
+ FCVTZU_ws_fixed = FCVTZU_fixed,
+ FCVTZU_xs_fixed = FCVTZU_fixed | SixtyFourBits,
+ FCVTZU_wd_fixed = FCVTZU_fixed | FP64,
+ FCVTZU_xd_fixed = FCVTZU_fixed | SixtyFourBits | FP64,
+ SCVTF_fixed = FPFixedPointConvertFixed | 0x00020000,
+ SCVTF_sw_fixed = SCVTF_fixed,
+ SCVTF_sx_fixed = SCVTF_fixed | SixtyFourBits,
+ SCVTF_dw_fixed = SCVTF_fixed | FP64,
+ SCVTF_dx_fixed = SCVTF_fixed | SixtyFourBits | FP64,
+ UCVTF_fixed = FPFixedPointConvertFixed | 0x00030000,
+ UCVTF_sw_fixed = UCVTF_fixed,
+ UCVTF_sx_fixed = UCVTF_fixed | SixtyFourBits,
+ UCVTF_dw_fixed = UCVTF_fixed | FP64,
+ UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64
+};
+
+// Unimplemented and unallocated instructions. These are defined to make fixed
+// bit assertion easier.
+enum UnimplementedOp {
+ UnimplementedFixed = 0x00000000,
+ UnimplementedFMask = 0x00000000
+};
+
+enum UnallocatedOp {
+ UnallocatedFixed = 0x00000000,
+ UnallocatedFMask = 0x00000000
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_CONSTANTS_ARM64_H_
diff --git a/deps/v8/src/arm64/cpu-arm64.cc b/deps/v8/src/arm64/cpu-arm64.cc
new file mode 100644
index 000000000..b8899adb3
--- /dev/null
+++ b/deps/v8/src/arm64/cpu-arm64.cc
@@ -0,0 +1,199 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// CPU specific code for arm independent of OS goes here.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "arm64/cpu-arm64.h"
+#include "arm64/utils-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
+unsigned CpuFeatures::cross_compile_ = 0;
+
+// Initialise to smallest possible cache size.
+unsigned CpuFeatures::dcache_line_size_ = 1;
+unsigned CpuFeatures::icache_line_size_ = 1;
+
+
+void CPU::SetUp() {
+ CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+ return true;
+}
+
+
+void CPU::FlushICache(void* address, size_t length) {
+ if (length == 0) {
+ return;
+ }
+
+#ifdef USE_SIMULATOR
+ // TODO(all): consider doing some cache simulation to ensure every address
+ // run has been synced.
+ USE(address);
+ USE(length);
+#else
+ // The code below assumes user space cache operations are allowed. The goal
+ // of this routine is to make sure the code generated is visible to the I
+ // side of the CPU.
+
+ uintptr_t start = reinterpret_cast<uintptr_t>(address);
+ // Sizes will be used to generate a mask big enough to cover a pointer.
+ uintptr_t dsize = static_cast<uintptr_t>(CpuFeatures::dcache_line_size());
+ uintptr_t isize = static_cast<uintptr_t>(CpuFeatures::icache_line_size());
+ // Cache line sizes are always a power of 2.
+ ASSERT(CountSetBits(dsize, 64) == 1);
+ ASSERT(CountSetBits(isize, 64) == 1);
+ uintptr_t dstart = start & ~(dsize - 1);
+ uintptr_t istart = start & ~(isize - 1);
+ uintptr_t end = start + length;
+
+ __asm__ __volatile__ ( // NOLINT
+ // Clean every line of the D cache containing the target data.
+ "0: \n\t"
+ // dc : Data Cache maintenance
+ // c : Clean
+ // va : by (Virtual) Address
+ // u : to the point of Unification
+ // The point of unification for a processor is the point by which the
+ // instruction and data caches are guaranteed to see the same copy of a
+ // memory location. See ARM DDI 0406B page B2-12 for more information.
+ "dc cvau, %[dline] \n\t"
+ "add %[dline], %[dline], %[dsize] \n\t"
+ "cmp %[dline], %[end] \n\t"
+ "b.lt 0b \n\t"
+ // Barrier to make sure the effect of the code above is visible to the rest
+ // of the world.
+ // dsb : Data Synchronisation Barrier
+ // ish : Inner SHareable domain
+ // The point of unification for an Inner Shareable shareability domain is
+ // the point by which the instruction and data caches of all the processors
+ // in that Inner Shareable shareability domain are guaranteed to see the
+ // same copy of a memory location. See ARM DDI 0406B page B2-12 for more
+ // information.
+ "dsb ish \n\t"
+ // Invalidate every line of the I cache containing the target data.
+ "1: \n\t"
+ // ic : instruction cache maintenance
+ // i : invalidate
+ // va : by address
+ // u : to the point of unification
+ "ic ivau, %[iline] \n\t"
+ "add %[iline], %[iline], %[isize] \n\t"
+ "cmp %[iline], %[end] \n\t"
+ "b.lt 1b \n\t"
+ // Barrier to make sure the effect of the code above is visible to the rest
+ // of the world.
+ "dsb ish \n\t"
+ // Barrier to ensure any prefetching which happened before this code is
+ // discarded.
+ // isb : Instruction Synchronisation Barrier
+ "isb \n\t"
+ : [dline] "+r" (dstart),
+ [iline] "+r" (istart)
+ : [dsize] "r" (dsize),
+ [isize] "r" (isize),
+ [end] "r" (end)
+ // This code does not write to memory but without the dependency gcc might
+ // move this code before the code is generated.
+ : "cc", "memory"
+ ); // NOLINT
+#endif
+}
+
+
+void CpuFeatures::Probe() {
+ // Compute I and D cache line size. The cache type register holds
+ // information about the caches.
+ uint32_t cache_type_register = GetCacheType();
+
+ static const int kDCacheLineSizeShift = 16;
+ static const int kICacheLineSizeShift = 0;
+ static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift;
+ static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift;
+
+ // The cache type register holds the size of the I and D caches as a power of
+ // two.
+ uint32_t dcache_line_size_power_of_two =
+ (cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift;
+ uint32_t icache_line_size_power_of_two =
+ (cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift;
+
+ dcache_line_size_ = 1 << dcache_line_size_power_of_two;
+ icache_line_size_ = 1 << icache_line_size_power_of_two;
+
+ // AArch64 has no configuration options, no further probing is required.
+ supported_ = 0;
+
+#ifdef DEBUG
+ initialized_ = true;
+#endif
+}
+
+
+unsigned CpuFeatures::dcache_line_size() {
+ ASSERT(initialized_);
+ return dcache_line_size_;
+}
+
+
+unsigned CpuFeatures::icache_line_size() {
+ ASSERT(initialized_);
+ return icache_line_size_;
+}
+
+
+uint32_t CpuFeatures::GetCacheType() {
+#ifdef USE_SIMULATOR
+ // This will lead to a cache with 1 byte long lines, which is fine since the
+ // simulator will not need this information.
+ return 0;
+#else
+ uint32_t cache_type_register;
+ // Copy the content of the cache type register to a core register.
+ __asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
+ : [ctr] "=r" (cache_type_register));
+ return cache_type_register;
+#endif
+}
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/cpu-arm64.h b/deps/v8/src/arm64/cpu-arm64.h
new file mode 100644
index 000000000..ddec72d8f
--- /dev/null
+++ b/deps/v8/src/arm64/cpu-arm64.h
@@ -0,0 +1,107 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_CPU_ARM64_H_
+#define V8_ARM64_CPU_ARM64_H_
+
+#include <stdio.h>
+#include "serialize.h"
+#include "cpu.h"
+
+namespace v8 {
+namespace internal {
+
+
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a CpuFeatureScope before use.
+class CpuFeatures : public AllStatic {
+ public:
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ static void Probe();
+
+ // Check whether a feature is supported by the target CPU.
+ static bool IsSupported(CpuFeature f) {
+ ASSERT(initialized_);
+ // There are no optional features for ARM64.
+ return false;
+ };
+
+ static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
+ ASSERT(initialized_);
+ // There are no optional features for ARM64.
+ return false;
+ }
+
+ static bool IsSafeForSnapshot(CpuFeature f) {
+ return (IsSupported(f) &&
+ (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
+ }
+
+ // I and D cache line size in bytes.
+ static unsigned dcache_line_size();
+ static unsigned icache_line_size();
+
+ static unsigned supported_;
+
+ static bool VerifyCrossCompiling() {
+ // There are no optional features for ARM64.
+ ASSERT(cross_compile_ == 0);
+ return true;
+ }
+
+ static bool VerifyCrossCompiling(CpuFeature f) {
+ // There are no optional features for ARM64.
+ USE(f);
+ ASSERT(cross_compile_ == 0);
+ return true;
+ }
+
+ private:
+ // Return the content of the cache type register.
+ static uint32_t GetCacheType();
+
+ // I and D cache line size in bytes.
+ static unsigned icache_line_size_;
+ static unsigned dcache_line_size_;
+
+#ifdef DEBUG
+ static bool initialized_;
+#endif
+
+ // This isn't used (and is always 0), but it is required by V8.
+ static unsigned found_by_runtime_probing_only_;
+
+ static unsigned cross_compile_;
+
+ friend class PlatformFeatureScope;
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_CPU_ARM64_H_
diff --git a/deps/v8/src/arm64/debug-arm64.cc b/deps/v8/src/arm64/debug-arm64.cc
new file mode 100644
index 000000000..716337f05
--- /dev/null
+++ b/deps/v8/src/arm64/debug-arm64.cc
@@ -0,0 +1,393 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "codegen.h"
+#include "debug.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+ return Debug::IsDebugBreakAtReturn(rinfo());
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+ // Patch the code emitted by FullCodeGenerator::EmitReturnSequence, changing
+ // the return from JS function sequence from
+ // mov sp, fp
+ // ldp fp, lr, [sp] #16
+ // lrd ip0, [pc, #(3 * kInstructionSize)]
+ // add sp, sp, ip0
+ // ret
+ // <number of paramters ...
+ // ... plus one (64 bits)>
+ // to a call to the debug break return code.
+ // ldr ip0, [pc, #(3 * kInstructionSize)]
+ // blr ip0
+ // hlt kHltBadCode @ code should not return, catch if it does.
+ // <debug break return code ...
+ // ... entry point address (64 bits)>
+
+ // The patching code must not overflow the space occupied by the return
+ // sequence.
+ STATIC_ASSERT(Assembler::kJSRetSequenceInstructions >= 5);
+ PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 5);
+ byte* entry =
+ debug_info_->GetIsolate()->debug()->debug_break_return()->entry();
+
+ // The first instruction of a patched return sequence must be a load literal
+ // loading the address of the debug break return code.
+ patcher.LoadLiteral(ip0, 3 * kInstructionSize);
+ // TODO(all): check the following is correct.
+ // The debug break return code will push a frame and call statically compiled
+ // code. By using blr, even though control will not return after the branch,
+ // this call site will be registered in the frame (lr being saved as the pc
+ // of the next instruction to execute for this frame). The debugger can now
+ // iterate on the frames to find call to debug break return code.
+ patcher.blr(ip0);
+ patcher.hlt(kHltBadCode);
+ patcher.dc64(reinterpret_cast<int64_t>(entry));
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+ // Reset the code emitted by EmitReturnSequence to its original state.
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kJSRetSequenceInstructions);
+}
+
+
+bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+ return rinfo->IsPatchedReturnSequence();
+}
+
+
+bool BreakLocationIterator::IsDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ // Check whether the debug break slot instructions have been patched.
+ return rinfo()->IsPatchedDebugBreakSlotSequence();
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtSlot() {
+ // Patch the code emitted by Debug::GenerateSlots, changing the debug break
+ // slot code from
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // to a call to the debug slot code.
+ // ldr ip0, [pc, #(2 * kInstructionSize)]
+ // blr ip0
+ // <debug break slot code ...
+ // ... entry point address (64 bits)>
+
+ // TODO(all): consider adding a hlt instruction after the blr as we don't
+ // expect control to return here. This implies increasing
+ // kDebugBreakSlotInstructions to 5 instructions.
+
+ // The patching code must not overflow the space occupied by the return
+ // sequence.
+ STATIC_ASSERT(Assembler::kDebugBreakSlotInstructions >= 4);
+ PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 4);
+ byte* entry =
+ debug_info_->GetIsolate()->debug()->debug_break_slot()->entry();
+
+ // The first instruction of a patched debug break slot must be a load literal
+ // loading the address of the debug break slot code.
+ patcher.LoadLiteral(ip0, 2 * kInstructionSize);
+ // TODO(all): check the following is correct.
+ // The debug break slot code will push a frame and call statically compiled
+ // code. By using blr, event hough control will not return after the branch,
+ // this call site will be registered in the frame (lr being saved as the pc
+ // of the next instruction to execute for this frame). The debugger can now
+ // iterate on the frames to find call to debug break slot code.
+ patcher.blr(ip0);
+ patcher.dc64(reinterpret_cast<int64_t>(entry));
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kDebugBreakSlotInstructions);
+}
+
+const bool Debug::FramePaddingLayout::kIsSupported = false;
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+ RegList object_regs,
+ RegList non_object_regs,
+ Register scratch) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Any live values (object_regs and non_object_regs) in caller-saved
+ // registers (or lr) need to be stored on the stack so that their values are
+ // safely preserved for a call into C code.
+ //
+ // Also:
+ // * object_regs may be modified during the C code by the garbage
+ // collector. Every object register must be a valid tagged pointer or
+ // SMI.
+ //
+ // * non_object_regs will be converted to SMIs so that the garbage
+ // collector doesn't try to interpret them as pointers.
+ //
+ // TODO(jbramley): Why can't this handle callee-saved registers?
+ ASSERT((~kCallerSaved.list() & object_regs) == 0);
+ ASSERT((~kCallerSaved.list() & non_object_regs) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ ASSERT((scratch.Bit() & object_regs) == 0);
+ ASSERT((scratch.Bit() & non_object_regs) == 0);
+ ASSERT((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0);
+ STATIC_ASSERT(kSmiValueSize == 32);
+
+ CPURegList non_object_list =
+ CPURegList(CPURegister::kRegister, kXRegSizeInBits, non_object_regs);
+ while (!non_object_list.IsEmpty()) {
+ // Store each non-object register as two SMIs.
+ Register reg = Register(non_object_list.PopLowestIndex());
+ __ Push(reg);
+ __ Poke(wzr, 0);
+ __ Push(reg.W(), wzr);
+ // Stack:
+ // jssp[12]: reg[63:32]
+ // jssp[8]: 0x00000000 (SMI tag & padding)
+ // jssp[4]: reg[31:0]
+ // jssp[0]: 0x00000000 (SMI tag & padding)
+ STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == 32));
+ }
+
+ if (object_regs != 0) {
+ __ PushXRegList(object_regs);
+ }
+
+#ifdef DEBUG
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+ __ Mov(x0, 0); // No arguments.
+ __ Mov(x1, ExternalReference::debug_break(masm->isolate()));
+
+ CEntryStub stub(1);
+ __ CallStub(&stub);
+
+ // Restore the register values from the expression stack.
+ if (object_regs != 0) {
+ __ PopXRegList(object_regs);
+ }
+
+ non_object_list =
+ CPURegList(CPURegister::kRegister, kXRegSizeInBits, non_object_regs);
+ while (!non_object_list.IsEmpty()) {
+ // Load each non-object register from two SMIs.
+ // Stack:
+ // jssp[12]: reg[63:32]
+ // jssp[8]: 0x00000000 (SMI tag & padding)
+ // jssp[4]: reg[31:0]
+ // jssp[0]: 0x00000000 (SMI tag & padding)
+ Register reg = Register(non_object_list.PopHighestIndex());
+ __ Pop(scratch, reg);
+ __ Bfxil(reg, scratch, 32, 32);
+ }
+
+ // Leave the internal frame.
+ }
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target(Debug_Address::AfterBreakTarget(),
+ masm->isolate());
+ __ Mov(scratch, after_break_target);
+ __ Ldr(scratch, MemOperand(scratch));
+ __ Br(scratch);
+}
+
+
+void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC load (from ic-arm.cc).
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -- [sp] : receiver
+ // -----------------------------------
+ // Registers x0 and x2 contain objects that need to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x2.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC store (from ic-arm.cc).
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+ // Registers x0, x1, and x2 contain objects that need to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC call (from ic-arm.cc)
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x2.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+ // In places other than IC call sites it is expected that r0 is TOS which
+ // is an object - this is not generally the case so this should be used with
+ // care.
+ Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-arm64.cc).
+ // ----------- S t a t e -------------
+ // -- x1 : function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x1.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-arm64.cc).
+ // ----------- S t a t e -------------
+ // -- x1 : function
+ // -- x2 : feedback array
+ // -- x3 : slot in feedback array
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x1.Bit() | x2.Bit() | x3.Bit(), 0, x10);
+}
+
+
+void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-arm64.cc).
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments (not smi)
+ // -- x1 : constructor function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, x1.Bit(), x0.Bit(), x10);
+}
+
+
+void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-arm64.cc).
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments (not smi)
+ // -- x1 : constructor function
+ // -- x2 : feedback array
+ // -- x3 : feedback slot (smi)
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(
+ masm, x1.Bit() | x2.Bit() | x3.Bit(), x0.Bit(), x10);
+}
+
+
+void Debug::GenerateSlot(MacroAssembler* masm) {
+ // Generate enough nop's to make space for a call instruction. Avoid emitting
+ // the constant pool in the debug break slot code.
+ InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions);
+
+ __ RecordDebugBreakSlot();
+ for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+ __ nop(Assembler::DEBUG_BREAK_NOP);
+ }
+}
+
+
+void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+ // In the places where a debug break slot is inserted no registers can contain
+ // object pointers.
+ Generate_DebugBreakCallHelper(masm, 0, 0, x10);
+}
+
+
+void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64);
+}
+
+
+void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64);
+}
+
+const bool Debug::kFrameDropperSupported = false;
+
+#endif // ENABLE_DEBUGGER_SUPPORT
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/decoder-arm64-inl.h b/deps/v8/src/arm64/decoder-arm64-inl.h
new file mode 100644
index 000000000..94009c704
--- /dev/null
+++ b/deps/v8/src/arm64/decoder-arm64-inl.h
@@ -0,0 +1,671 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_DECODER_ARM64_INL_H_
+#define V8_ARM64_DECODER_ARM64_INL_H_
+
+#include "arm64/decoder-arm64.h"
+#include "globals.h"
+#include "utils.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+// Top-level instruction decode function.
+template<typename V>
+void Decoder<V>::Decode(Instruction *instr) {
+ if (instr->Bits(28, 27) == 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ switch (instr->Bits(27, 24)) {
+ // 0: PC relative addressing.
+ case 0x0: DecodePCRelAddressing(instr); break;
+
+ // 1: Add/sub immediate.
+ case 0x1: DecodeAddSubImmediate(instr); break;
+
+ // A: Logical shifted register.
+ // Add/sub with carry.
+ // Conditional compare register.
+ // Conditional compare immediate.
+ // Conditional select.
+ // Data processing 1 source.
+ // Data processing 2 source.
+ // B: Add/sub shifted register.
+ // Add/sub extended register.
+ // Data processing 3 source.
+ case 0xA:
+ case 0xB: DecodeDataProcessing(instr); break;
+
+ // 2: Logical immediate.
+ // Move wide immediate.
+ case 0x2: DecodeLogical(instr); break;
+
+ // 3: Bitfield.
+ // Extract.
+ case 0x3: DecodeBitfieldExtract(instr); break;
+
+ // 4: Unconditional branch immediate.
+ // Exception generation.
+ // Compare and branch immediate.
+ // 5: Compare and branch immediate.
+ // Conditional branch.
+ // System.
+ // 6,7: Unconditional branch.
+ // Test and branch immediate.
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7: DecodeBranchSystemException(instr); break;
+
+ // 8,9: Load/store register pair post-index.
+ // Load register literal.
+ // Load/store register unscaled immediate.
+ // Load/store register immediate post-index.
+ // Load/store register immediate pre-index.
+ // Load/store register offset.
+ // C,D: Load/store register pair offset.
+ // Load/store register pair pre-index.
+ // Load/store register unsigned immediate.
+ // Advanced SIMD.
+ case 0x8:
+ case 0x9:
+ case 0xC:
+ case 0xD: DecodeLoadStore(instr); break;
+
+ // E: FP fixed point conversion.
+ // FP integer conversion.
+ // FP data processing 1 source.
+ // FP compare.
+ // FP immediate.
+ // FP data processing 2 source.
+ // FP conditional compare.
+ // FP conditional select.
+ // Advanced SIMD.
+ // F: FP data processing 3 source.
+ // Advanced SIMD.
+ case 0xE:
+ case 0xF: DecodeFP(instr); break;
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodePCRelAddressing(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x0);
+ // We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
+ // decode.
+ ASSERT(instr->Bit(28) == 0x1);
+ V::VisitPCRelAddressing(instr);
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0x4) ||
+ (instr->Bits(27, 24) == 0x5) ||
+ (instr->Bits(27, 24) == 0x6) ||
+ (instr->Bits(27, 24) == 0x7) );
+
+ switch (instr->Bits(31, 29)) {
+ case 0:
+ case 4: {
+ V::VisitUnconditionalBranch(instr);
+ break;
+ }
+ case 1:
+ case 5: {
+ if (instr->Bit(25) == 0) {
+ V::VisitCompareBranch(instr);
+ } else {
+ V::VisitTestBranch(instr);
+ }
+ break;
+ }
+ case 2: {
+ if (instr->Bit(25) == 0) {
+ if ((instr->Bit(24) == 0x1) ||
+ (instr->Mask(0x01000010) == 0x00000010)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitConditionalBranch(instr);
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ break;
+ }
+ case 6: {
+ if (instr->Bit(25) == 0) {
+ if (instr->Bit(24) == 0) {
+ if ((instr->Bits(4, 2) != 0) ||
+ (instr->Mask(0x00E0001D) == 0x00200001) ||
+ (instr->Mask(0x00E0001D) == 0x00400001) ||
+ (instr->Mask(0x00E0001E) == 0x00200002) ||
+ (instr->Mask(0x00E0001E) == 0x00400002) ||
+ (instr->Mask(0x00E0001C) == 0x00600000) ||
+ (instr->Mask(0x00E0001C) == 0x00800000) ||
+ (instr->Mask(0x00E0001F) == 0x00A00000) ||
+ (instr->Mask(0x00C0001C) == 0x00C00000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitException(instr);
+ }
+ } else {
+ if (instr->Bits(23, 22) == 0) {
+ const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0);
+ if ((instr->Bits(21, 19) == 0x4) ||
+ (masked_003FF0E0 == 0x00033000) ||
+ (masked_003FF0E0 == 0x003FF020) ||
+ (masked_003FF0E0 == 0x003FF060) ||
+ (masked_003FF0E0 == 0x003FF0E0) ||
+ (instr->Mask(0x00388000) == 0x00008000) ||
+ (instr->Mask(0x0038E000) == 0x00000000) ||
+ (instr->Mask(0x0039E000) == 0x00002000) ||
+ (instr->Mask(0x003AE000) == 0x00002000) ||
+ (instr->Mask(0x003CE000) == 0x00042000) ||
+ (instr->Mask(0x003FFFC0) == 0x000320C0) ||
+ (instr->Mask(0x003FF100) == 0x00032100) ||
+ (instr->Mask(0x003FF200) == 0x00032200) ||
+ (instr->Mask(0x003FF400) == 0x00032400) ||
+ (instr->Mask(0x003FF800) == 0x00032800) ||
+ (instr->Mask(0x0038F000) == 0x00005000) ||
+ (instr->Mask(0x0038E000) == 0x00006000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitSystem(instr);
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ }
+ } else {
+ if ((instr->Bit(24) == 0x1) ||
+ (instr->Bits(20, 16) != 0x1F) ||
+ (instr->Bits(15, 10) != 0) ||
+ (instr->Bits(4, 0) != 0) ||
+ (instr->Bits(24, 21) == 0x3) ||
+ (instr->Bits(24, 22) == 0x3)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitUnconditionalBranchToRegister(instr);
+ }
+ }
+ break;
+ }
+ case 3:
+ case 7: {
+ V::VisitUnallocated(instr);
+ break;
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeLoadStore(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0x8) ||
+ (instr->Bits(27, 24) == 0x9) ||
+ (instr->Bits(27, 24) == 0xC) ||
+ (instr->Bits(27, 24) == 0xD) );
+
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(26) == 0) {
+ // TODO(all): VisitLoadStoreExclusive.
+ V::VisitUnimplemented(instr);
+ } else {
+ DecodeAdvSIMDLoadStore(instr);
+ }
+ } else {
+ if ((instr->Bits(31, 30) == 0x3) ||
+ (instr->Mask(0xC4400000) == 0x40000000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ if (instr->Mask(0xC4400000) == 0xC0400000) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLoadStorePairNonTemporal(instr);
+ }
+ } else {
+ V::VisitLoadStorePairPostIndex(instr);
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(29) == 0) {
+ if (instr->Mask(0xC4000000) == 0xC4000000) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLoadLiteral(instr);
+ }
+ } else {
+ if ((instr->Mask(0x84C00000) == 0x80C00000) ||
+ (instr->Mask(0x44800000) == 0x44800000) ||
+ (instr->Mask(0x84800000) == 0x84800000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(21) == 0) {
+ switch (instr->Bits(11, 10)) {
+ case 0: {
+ V::VisitLoadStoreUnscaledOffset(instr);
+ break;
+ }
+ case 1: {
+ if (instr->Mask(0xC4C00000) == 0xC0800000) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLoadStorePostIndex(instr);
+ }
+ break;
+ }
+ case 2: {
+ // TODO(all): VisitLoadStoreRegisterOffsetUnpriv.
+ V::VisitUnimplemented(instr);
+ break;
+ }
+ case 3: {
+ if (instr->Mask(0xC4C00000) == 0xC0800000) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLoadStorePreIndex(instr);
+ }
+ break;
+ }
+ }
+ } else {
+ if (instr->Bits(11, 10) == 0x2) {
+ if (instr->Bit(14) == 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLoadStoreRegisterOffset(instr);
+ }
+ } else {
+ V::VisitUnallocated(instr);
+ }
+ }
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(29) == 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ if ((instr->Bits(31, 30) == 0x3) ||
+ (instr->Mask(0xC4400000) == 0x40000000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ V::VisitLoadStorePairOffset(instr);
+ } else {
+ V::VisitLoadStorePairPreIndex(instr);
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(29) == 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ if ((instr->Mask(0x84C00000) == 0x80C00000) ||
+ (instr->Mask(0x44800000) == 0x44800000) ||
+ (instr->Mask(0x84800000) == 0x84800000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLoadStoreUnsignedOffset(instr);
+ }
+ }
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeLogical(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x2);
+
+ if (instr->Mask(0x80400000) == 0x00400000) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ V::VisitLogicalImmediate(instr);
+ } else {
+ if (instr->Bits(30, 29) == 0x1) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitMoveWideImmediate(instr);
+ }
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x3);
+
+ if ((instr->Mask(0x80400000) == 0x80000000) ||
+ (instr->Mask(0x80400000) == 0x00400000) ||
+ (instr->Mask(0x80008000) == 0x00008000)) {
+ V::VisitUnallocated(instr);
+ } else if (instr->Bit(23) == 0) {
+ if ((instr->Mask(0x80200000) == 0x00200000) ||
+ (instr->Mask(0x60000000) == 0x60000000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitBitfield(instr);
+ }
+ } else {
+ if ((instr->Mask(0x60200000) == 0x00200000) ||
+ (instr->Mask(0x60000000) != 0x00000000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitExtract(instr);
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeAddSubImmediate(Instruction* instr) {
+ ASSERT(instr->Bits(27, 24) == 0x1);
+ if (instr->Bit(23) == 1) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitAddSubImmediate(instr);
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0xA) ||
+ (instr->Bits(27, 24) == 0xB) );
+
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(28) == 0) {
+ if (instr->Mask(0x80008000) == 0x00008000) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitLogicalShifted(instr);
+ }
+ } else {
+ switch (instr->Bits(23, 21)) {
+ case 0: {
+ if (instr->Mask(0x0000FC00) != 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitAddSubWithCarry(instr);
+ }
+ break;
+ }
+ case 2: {
+ if ((instr->Bit(29) == 0) ||
+ (instr->Mask(0x00000410) != 0)) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(11) == 0) {
+ V::VisitConditionalCompareRegister(instr);
+ } else {
+ V::VisitConditionalCompareImmediate(instr);
+ }
+ }
+ break;
+ }
+ case 4: {
+ if (instr->Mask(0x20000800) != 0x00000000) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitConditionalSelect(instr);
+ }
+ break;
+ }
+ case 6: {
+ if (instr->Bit(29) == 0x1) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(30) == 0) {
+ if ((instr->Bit(15) == 0x1) ||
+ (instr->Bits(15, 11) == 0) ||
+ (instr->Bits(15, 12) == 0x1) ||
+ (instr->Bits(15, 12) == 0x3) ||
+ (instr->Bits(15, 13) == 0x3) ||
+ (instr->Mask(0x8000EC00) == 0x00004C00) ||
+ (instr->Mask(0x8000E800) == 0x80004000) ||
+ (instr->Mask(0x8000E400) == 0x80004000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitDataProcessing2Source(instr);
+ }
+ } else {
+ if ((instr->Bit(13) == 1) ||
+ (instr->Bits(20, 16) != 0) ||
+ (instr->Bits(15, 14) != 0) ||
+ (instr->Mask(0xA01FFC00) == 0x00000C00) ||
+ (instr->Mask(0x201FF800) == 0x00001800)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitDataProcessing1Source(instr);
+ }
+ }
+ break;
+ }
+ }
+ case 1:
+ case 3:
+ case 5:
+ case 7: V::VisitUnallocated(instr); break;
+ }
+ }
+ } else {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(21) == 0) {
+ if ((instr->Bits(23, 22) == 0x3) ||
+ (instr->Mask(0x80008000) == 0x00008000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitAddSubShifted(instr);
+ }
+ } else {
+ if ((instr->Mask(0x00C00000) != 0x00000000) ||
+ (instr->Mask(0x00001400) == 0x00001400) ||
+ (instr->Mask(0x00001800) == 0x00001800)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitAddSubExtended(instr);
+ }
+ }
+ } else {
+ if ((instr->Bit(30) == 0x1) ||
+ (instr->Bits(30, 29) == 0x1) ||
+ (instr->Mask(0xE0600000) == 0x00200000) ||
+ (instr->Mask(0xE0608000) == 0x00400000) ||
+ (instr->Mask(0x60608000) == 0x00408000) ||
+ (instr->Mask(0x60E00000) == 0x00E00000) ||
+ (instr->Mask(0x60E00000) == 0x00800000) ||
+ (instr->Mask(0x60E00000) == 0x00600000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitDataProcessing3Source(instr);
+ }
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeFP(Instruction* instr) {
+ ASSERT((instr->Bits(27, 24) == 0xE) ||
+ (instr->Bits(27, 24) == 0xF) );
+
+ if (instr->Bit(28) == 0) {
+ DecodeAdvSIMDDataProcessing(instr);
+ } else {
+ if (instr->Bit(29) == 1) {
+ V::VisitUnallocated(instr);
+ } else {
+ if (instr->Bits(31, 30) == 0x3) {
+ V::VisitUnallocated(instr);
+ } else if (instr->Bits(31, 30) == 0x1) {
+ DecodeAdvSIMDDataProcessing(instr);
+ } else {
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(21) == 0) {
+ if ((instr->Bit(23) == 1) ||
+ (instr->Bit(18) == 1) ||
+ (instr->Mask(0x80008000) == 0x00000000) ||
+ (instr->Mask(0x000E0000) == 0x00000000) ||
+ (instr->Mask(0x000E0000) == 0x000A0000) ||
+ (instr->Mask(0x00160000) == 0x00000000) ||
+ (instr->Mask(0x00160000) == 0x00120000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPFixedPointConvert(instr);
+ }
+ } else {
+ if (instr->Bits(15, 10) == 32) {
+ V::VisitUnallocated(instr);
+ } else if (instr->Bits(15, 10) == 0) {
+ if ((instr->Bits(23, 22) == 0x3) ||
+ (instr->Mask(0x000E0000) == 0x000A0000) ||
+ (instr->Mask(0x000E0000) == 0x000C0000) ||
+ (instr->Mask(0x00160000) == 0x00120000) ||
+ (instr->Mask(0x00160000) == 0x00140000) ||
+ (instr->Mask(0x20C40000) == 0x00800000) ||
+ (instr->Mask(0x20C60000) == 0x00840000) ||
+ (instr->Mask(0xA0C60000) == 0x80060000) ||
+ (instr->Mask(0xA0C60000) == 0x00860000) ||
+ (instr->Mask(0xA0C60000) == 0x00460000) ||
+ (instr->Mask(0xA0CE0000) == 0x80860000) ||
+ (instr->Mask(0xA0CE0000) == 0x804E0000) ||
+ (instr->Mask(0xA0CE0000) == 0x000E0000) ||
+ (instr->Mask(0xA0D60000) == 0x00160000) ||
+ (instr->Mask(0xA0D60000) == 0x80560000) ||
+ (instr->Mask(0xA0D60000) == 0x80960000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPIntegerConvert(instr);
+ }
+ } else if (instr->Bits(14, 10) == 16) {
+ const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000);
+ if ((instr->Mask(0x80180000) != 0) ||
+ (masked_A0DF8000 == 0x00020000) ||
+ (masked_A0DF8000 == 0x00030000) ||
+ (masked_A0DF8000 == 0x00068000) ||
+ (masked_A0DF8000 == 0x00428000) ||
+ (masked_A0DF8000 == 0x00430000) ||
+ (masked_A0DF8000 == 0x00468000) ||
+ (instr->Mask(0xA0D80000) == 0x00800000) ||
+ (instr->Mask(0xA0DE0000) == 0x00C00000) ||
+ (instr->Mask(0xA0DF0000) == 0x00C30000) ||
+ (instr->Mask(0xA0DC0000) == 0x00C40000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPDataProcessing1Source(instr);
+ }
+ } else if (instr->Bits(13, 10) == 8) {
+ if ((instr->Bits(15, 14) != 0) ||
+ (instr->Bits(2, 0) != 0) ||
+ (instr->Mask(0x80800000) != 0x00000000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPCompare(instr);
+ }
+ } else if (instr->Bits(12, 10) == 4) {
+ if ((instr->Bits(9, 5) != 0) ||
+ (instr->Mask(0x80800000) != 0x00000000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPImmediate(instr);
+ }
+ } else {
+ if (instr->Mask(0x80800000) != 0x00000000) {
+ V::VisitUnallocated(instr);
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 1: {
+ V::VisitFPConditionalCompare(instr);
+ break;
+ }
+ case 2: {
+ if ((instr->Bits(15, 14) == 0x3) ||
+ (instr->Mask(0x00009000) == 0x00009000) ||
+ (instr->Mask(0x0000A000) == 0x0000A000)) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPDataProcessing2Source(instr);
+ }
+ break;
+ }
+ case 3: {
+ V::VisitFPConditionalSelect(instr);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ }
+ }
+ }
+ } else {
+ // Bit 30 == 1 has been handled earlier.
+ ASSERT(instr->Bit(30) == 0);
+ if (instr->Mask(0xA0800000) != 0) {
+ V::VisitUnallocated(instr);
+ } else {
+ V::VisitFPDataProcessing3Source(instr);
+ }
+ }
+ }
+ }
+ }
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeAdvSIMDLoadStore(Instruction* instr) {
+ // TODO(all): Implement Advanced SIMD load/store instruction decode.
+ ASSERT(instr->Bits(29, 25) == 0x6);
+ V::VisitUnimplemented(instr);
+}
+
+
+template<typename V>
+void Decoder<V>::DecodeAdvSIMDDataProcessing(Instruction* instr) {
+ // TODO(all): Implement Advanced SIMD data processing instruction decode.
+ ASSERT(instr->Bits(27, 25) == 0x7);
+ V::VisitUnimplemented(instr);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_DECODER_ARM64_INL_H_
diff --git a/deps/v8/src/arm64/decoder-arm64.cc b/deps/v8/src/arm64/decoder-arm64.cc
new file mode 100644
index 000000000..a9829f0ab
--- /dev/null
+++ b/deps/v8/src/arm64/decoder-arm64.cc
@@ -0,0 +1,109 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "globals.h"
+#include "utils.h"
+#include "arm64/decoder-arm64.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+void DispatchingDecoderVisitor::AppendVisitor(DecoderVisitor* new_visitor) {
+ visitors_.remove(new_visitor);
+ visitors_.push_front(new_visitor);
+}
+
+
+void DispatchingDecoderVisitor::PrependVisitor(DecoderVisitor* new_visitor) {
+ visitors_.remove(new_visitor);
+ visitors_.push_back(new_visitor);
+}
+
+
+void DispatchingDecoderVisitor::InsertVisitorBefore(
+ DecoderVisitor* new_visitor, DecoderVisitor* registered_visitor) {
+ visitors_.remove(new_visitor);
+ std::list<DecoderVisitor*>::iterator it;
+ for (it = visitors_.begin(); it != visitors_.end(); it++) {
+ if (*it == registered_visitor) {
+ visitors_.insert(it, new_visitor);
+ return;
+ }
+ }
+ // We reached the end of the list. The last element must be
+ // registered_visitor.
+ ASSERT(*it == registered_visitor);
+ visitors_.insert(it, new_visitor);
+}
+
+
+void DispatchingDecoderVisitor::InsertVisitorAfter(
+ DecoderVisitor* new_visitor, DecoderVisitor* registered_visitor) {
+ visitors_.remove(new_visitor);
+ std::list<DecoderVisitor*>::iterator it;
+ for (it = visitors_.begin(); it != visitors_.end(); it++) {
+ if (*it == registered_visitor) {
+ it++;
+ visitors_.insert(it, new_visitor);
+ return;
+ }
+ }
+ // We reached the end of the list. The last element must be
+ // registered_visitor.
+ ASSERT(*it == registered_visitor);
+ visitors_.push_back(new_visitor);
+}
+
+
+void DispatchingDecoderVisitor::RemoveVisitor(DecoderVisitor* visitor) {
+ visitors_.remove(visitor);
+}
+
+
+#define DEFINE_VISITOR_CALLERS(A) \
+ void DispatchingDecoderVisitor::Visit##A(Instruction* instr) { \
+ if (!(instr->Mask(A##FMask) == A##Fixed)) { \
+ ASSERT(instr->Mask(A##FMask) == A##Fixed); \
+ } \
+ std::list<DecoderVisitor*>::iterator it; \
+ for (it = visitors_.begin(); it != visitors_.end(); it++) { \
+ (*it)->Visit##A(instr); \
+ } \
+ }
+VISITOR_LIST(DEFINE_VISITOR_CALLERS)
+#undef DEFINE_VISITOR_CALLERS
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/decoder-arm64.h b/deps/v8/src/arm64/decoder-arm64.h
new file mode 100644
index 000000000..e48f741bf
--- /dev/null
+++ b/deps/v8/src/arm64/decoder-arm64.h
@@ -0,0 +1,210 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_DECODER_ARM64_H_
+#define V8_ARM64_DECODER_ARM64_H_
+
+#include <list>
+
+#include "globals.h"
+#include "arm64/instructions-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+
+// List macro containing all visitors needed by the decoder class.
+
+#define VISITOR_LIST(V) \
+ V(PCRelAddressing) \
+ V(AddSubImmediate) \
+ V(LogicalImmediate) \
+ V(MoveWideImmediate) \
+ V(Bitfield) \
+ V(Extract) \
+ V(UnconditionalBranch) \
+ V(UnconditionalBranchToRegister) \
+ V(CompareBranch) \
+ V(TestBranch) \
+ V(ConditionalBranch) \
+ V(System) \
+ V(Exception) \
+ V(LoadStorePairPostIndex) \
+ V(LoadStorePairOffset) \
+ V(LoadStorePairPreIndex) \
+ V(LoadStorePairNonTemporal) \
+ V(LoadLiteral) \
+ V(LoadStoreUnscaledOffset) \
+ V(LoadStorePostIndex) \
+ V(LoadStorePreIndex) \
+ V(LoadStoreRegisterOffset) \
+ V(LoadStoreUnsignedOffset) \
+ V(LogicalShifted) \
+ V(AddSubShifted) \
+ V(AddSubExtended) \
+ V(AddSubWithCarry) \
+ V(ConditionalCompareRegister) \
+ V(ConditionalCompareImmediate) \
+ V(ConditionalSelect) \
+ V(DataProcessing1Source) \
+ V(DataProcessing2Source) \
+ V(DataProcessing3Source) \
+ V(FPCompare) \
+ V(FPConditionalCompare) \
+ V(FPConditionalSelect) \
+ V(FPImmediate) \
+ V(FPDataProcessing1Source) \
+ V(FPDataProcessing2Source) \
+ V(FPDataProcessing3Source) \
+ V(FPIntegerConvert) \
+ V(FPFixedPointConvert) \
+ V(Unallocated) \
+ V(Unimplemented)
+
+// The Visitor interface. Disassembler and simulator (and other tools)
+// must provide implementations for all of these functions.
+class DecoderVisitor {
+ public:
+ virtual ~DecoderVisitor() {}
+
+ #define DECLARE(A) virtual void Visit##A(Instruction* instr) = 0;
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+};
+
+
+// A visitor that dispatches to a list of visitors.
+class DispatchingDecoderVisitor : public DecoderVisitor {
+ public:
+ DispatchingDecoderVisitor() {}
+ virtual ~DispatchingDecoderVisitor() {}
+
+ // Register a new visitor class with the decoder.
+ // Decode() will call the corresponding visitor method from all registered
+ // visitor classes when decoding reaches the leaf node of the instruction
+ // decode tree.
+ // Visitors are called in the order.
+ // A visitor can only be registered once.
+ // Registering an already registered visitor will update its position.
+ //
+ // d.AppendVisitor(V1);
+ // d.AppendVisitor(V2);
+ // d.PrependVisitor(V2); // Move V2 at the start of the list.
+ // d.InsertVisitorBefore(V3, V2);
+ // d.AppendVisitor(V4);
+ // d.AppendVisitor(V4); // No effect.
+ //
+ // d.Decode(i);
+ //
+ // will call in order visitor methods in V3, V2, V1, V4.
+ void AppendVisitor(DecoderVisitor* visitor);
+ void PrependVisitor(DecoderVisitor* visitor);
+ void InsertVisitorBefore(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor);
+ void InsertVisitorAfter(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor);
+
+ // Remove a previously registered visitor class from the list of visitors
+ // stored by the decoder.
+ void RemoveVisitor(DecoderVisitor* visitor);
+
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ private:
+ // Visitors are registered in a list.
+ std::list<DecoderVisitor*> visitors_;
+};
+
+
+template<typename V>
+class Decoder : public V {
+ public:
+ Decoder() {}
+ virtual ~Decoder() {}
+
+ // Top-level instruction decoder function. Decodes an instruction and calls
+ // the visitor functions registered with the Decoder class.
+ virtual void Decode(Instruction *instr);
+
+ private:
+ // Decode the PC relative addressing instruction, and call the corresponding
+ // visitors.
+ // On entry, instruction bits 27:24 = 0x0.
+ void DecodePCRelAddressing(Instruction* instr);
+
+ // Decode the add/subtract immediate instruction, and call the corresponding
+ // visitors.
+ // On entry, instruction bits 27:24 = 0x1.
+ void DecodeAddSubImmediate(Instruction* instr);
+
+ // Decode the branch, system command, and exception generation parts of
+ // the instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
+ void DecodeBranchSystemException(Instruction* instr);
+
+ // Decode the load and store parts of the instruction tree, and call
+ // the corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
+ void DecodeLoadStore(Instruction* instr);
+
+ // Decode the logical immediate and move wide immediate parts of the
+ // instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = 0x2.
+ void DecodeLogical(Instruction* instr);
+
+ // Decode the bitfield and extraction parts of the instruction tree,
+ // and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = 0x3.
+ void DecodeBitfieldExtract(Instruction* instr);
+
+ // Decode the data processing parts of the instruction tree, and call the
+ // corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
+ void DecodeDataProcessing(Instruction* instr);
+
+ // Decode the floating point parts of the instruction tree, and call the
+ // corresponding visitors.
+ // On entry, instruction bits 27:24 = {0xE, 0xF}.
+ void DecodeFP(Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
+ // and call the corresponding visitors.
+ // On entry, instruction bits 29:25 = 0x6.
+ void DecodeAdvSIMDLoadStore(Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) data processing part of the instruction
+ // tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:25 = 0x7.
+ void DecodeAdvSIMDDataProcessing(Instruction* instr);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_DECODER_ARM64_H_
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
new file mode 100644
index 000000000..93cb5176d
--- /dev/null
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -0,0 +1,388 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "safepoint-table.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+int Deoptimizer::patch_size() {
+ // Size of the code used to patch lazy bailout points.
+ // Patching is done by Deoptimizer::DeoptimizeFunction.
+ return 4 * kInstructionSize;
+}
+
+
+
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
+ // Invalidate the relocation information, as it will become invalid by the
+ // code patching below, and is not needed any more.
+ code->InvalidateRelocation();
+
+ // TODO(jkummerow): if (FLAG_zap_code_space), make the code object's
+ // entry sequence unusable (see other architectures).
+
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
+ Address code_start_address = code->instruction_start();
+#ifdef DEBUG
+ Address prev_call_address = NULL;
+#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
+ for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+ if (deopt_data->Pc(i)->value() == -1) continue;
+
+ Address call_address = code_start_address + deopt_data->Pc(i)->value();
+ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
+
+ PatchingAssembler patcher(call_address, patch_size() / kInstructionSize);
+ patcher.LoadLiteral(ip0, 2 * kInstructionSize);
+ patcher.blr(ip0);
+ patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
+
+ ASSERT((prev_call_address == NULL) ||
+ (call_address >= prev_call_address + patch_size()));
+ ASSERT(call_address + patch_size() <= code->instruction_end());
+#ifdef DEBUG
+ prev_call_address = call_address;
+#endif
+ }
+}
+
+
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers fp and sp are set to the correct values though.
+ for (int i = 0; i < Register::NumRegisters(); i++) {
+ input_->SetRegister(i, 0);
+ }
+
+ // TODO(all): Do we also need to set a value to csp?
+ input_->SetRegister(jssp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
+ }
+}
+
+
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+ // There is no dynamic alignment padding on ARM64 in the input frame.
+ return false;
+}
+
+
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+ FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+ ApiFunction function(descriptor->deoptimization_handler_);
+ ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
+ intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
+ int params = descriptor->GetHandlerParameterCount();
+ output_frame->SetRegister(x0.code(), params);
+ output_frame->SetRegister(x1.code(), handler);
+}
+
+
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
+ }
+}
+
+
+Code* Deoptimizer::NotifyStubFailureBuiltin() {
+ return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
+}
+
+
+#define __ masm()->
+
+void Deoptimizer::EntryGenerator::Generate() {
+ GeneratePrologue();
+
+ // TODO(all): This code needs to be revisited. We probably only need to save
+ // caller-saved registers here. Callee-saved registers can be stored directly
+ // in the input frame.
+
+ // Save all allocatable floating point registers.
+ CPURegList saved_fp_registers(CPURegister::kFPRegister, kDRegSizeInBits,
+ FPRegister::kAllocatableFPRegisters);
+ __ PushCPURegList(saved_fp_registers);
+
+ // We save all the registers expcept jssp, sp and lr.
+ CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 27);
+ saved_registers.Combine(fp);
+ __ PushCPURegList(saved_registers);
+
+ const int kSavedRegistersAreaSize =
+ (saved_registers.Count() * kXRegSize) +
+ (saved_fp_registers.Count() * kDRegSize);
+
+ // Floating point registers are saved on the stack above core registers.
+ const int kFPRegistersOffset = saved_registers.Count() * kXRegSize;
+
+ // Get the bailout id from the stack.
+ Register bailout_id = x2;
+ __ Peek(bailout_id, kSavedRegistersAreaSize);
+
+ Register code_object = x3;
+ Register fp_to_sp = x4;
+ // Get the address of the location in the code object. This is the return
+ // address for lazy deoptimization.
+ __ Mov(code_object, lr);
+ // Compute the fp-to-sp delta, and correct one word for bailout id.
+ __ Add(fp_to_sp, masm()->StackPointer(),
+ kSavedRegistersAreaSize + (1 * kPointerSize));
+ __ Sub(fp_to_sp, fp, fp_to_sp);
+
+ // Allocate a new deoptimizer object.
+ __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Mov(x1, type());
+ // Following arguments are already loaded:
+ // - x2: bailout id
+ // - x3: code object address
+ // - x4: fp-to-sp delta
+ __ Mov(x5, ExternalReference::isolate_address(isolate()));
+
+ {
+ // Call Deoptimizer::New().
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
+ }
+
+ // Preserve "deoptimizer" object in register x0.
+ Register deoptimizer = x0;
+
+ // Get the input frame descriptor pointer.
+ __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
+
+ // Copy core registers into the input frame.
+ CPURegList copy_to_input = saved_registers;
+ for (int i = 0; i < saved_registers.Count(); i++) {
+ // TODO(all): Look for opportunities to optimize this by using ldp/stp.
+ __ Peek(x2, i * kPointerSize);
+ CPURegister current_reg = copy_to_input.PopLowestIndex();
+ int offset = (current_reg.code() * kPointerSize) +
+ FrameDescription::registers_offset();
+ __ Str(x2, MemOperand(x1, offset));
+ }
+
+ // Copy FP registers to the input frame.
+ for (int i = 0; i < saved_fp_registers.Count(); i++) {
+ // TODO(all): Look for opportunities to optimize this by using ldp/stp.
+ int dst_offset = FrameDescription::double_registers_offset() +
+ (i * kDoubleSize);
+ int src_offset = kFPRegistersOffset + (i * kDoubleSize);
+ __ Peek(x2, src_offset);
+ __ Str(x2, MemOperand(x1, dst_offset));
+ }
+
+ // Remove the bailout id and the saved registers from the stack.
+ __ Drop(1 + (kSavedRegistersAreaSize / kXRegSize));
+
+ // Compute a pointer to the unwinding limit in register x2; that is
+ // the first stack slot not part of the input frame.
+ Register unwind_limit = x2;
+ __ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
+ __ Add(unwind_limit, unwind_limit, __ StackPointer());
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ Add(x3, x1, FrameDescription::frame_content_offset());
+ Label pop_loop;
+ Label pop_loop_header;
+ __ B(&pop_loop_header);
+ __ Bind(&pop_loop);
+ __ Pop(x4);
+ __ Str(x4, MemOperand(x3, kPointerSize, PostIndex));
+ __ Bind(&pop_loop_header);
+ __ Cmp(unwind_limit, __ StackPointer());
+ __ B(ne, &pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ Push(x0); // Preserve deoptimizer object across call.
+
+ {
+ // Call Deoptimizer::ComputeOutputFrames().
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate()), 1);
+ }
+ __ Pop(x4); // Restore deoptimizer object (class Deoptimizer).
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
+ __ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
+ __ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
+ __ Add(x1, x0, Operand(x1, LSL, kPointerSizeLog2));
+ __ B(&outer_loop_header);
+
+ __ Bind(&outer_push_loop);
+ Register current_frame = x2;
+ __ Ldr(current_frame, MemOperand(x0, 0));
+ __ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
+ __ B(&inner_loop_header);
+
+ __ Bind(&inner_push_loop);
+ __ Sub(x3, x3, kPointerSize);
+ __ Add(x6, current_frame, x3);
+ __ Ldr(x7, MemOperand(x6, FrameDescription::frame_content_offset()));
+ __ Push(x7);
+ __ Bind(&inner_loop_header);
+ __ Cbnz(x3, &inner_push_loop);
+
+ __ Add(x0, x0, kPointerSize);
+ __ Bind(&outer_loop_header);
+ __ Cmp(x0, x1);
+ __ B(lt, &outer_push_loop);
+
+ __ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
+ ASSERT(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
+ !saved_fp_registers.IncludesAliasOf(fp_zero) &&
+ !saved_fp_registers.IncludesAliasOf(fp_scratch));
+ int src_offset = FrameDescription::double_registers_offset();
+ while (!saved_fp_registers.IsEmpty()) {
+ const CPURegister reg = saved_fp_registers.PopLowestIndex();
+ __ Ldr(reg, MemOperand(x1, src_offset));
+ src_offset += kDoubleSize;
+ }
+
+ // Push state from the last output frame.
+ __ Ldr(x6, MemOperand(current_frame, FrameDescription::state_offset()));
+ __ Push(x6);
+
+ // TODO(all): ARM copies a lot (if not all) of the last output frame onto the
+ // stack, then pops it all into registers. Here, we try to load it directly
+ // into the relevant registers. Is this correct? If so, we should improve the
+ // ARM code.
+
+ // TODO(all): This code needs to be revisited, We probably don't need to
+ // restore all the registers as fullcodegen does not keep live values in
+ // registers (note that at least fp must be restored though).
+
+ // Restore registers from the last output frame.
+ // Note that lr is not in the list of saved_registers and will be restored
+ // later. We can use it to hold the address of last output frame while
+ // reloading the other registers.
+ ASSERT(!saved_registers.IncludesAliasOf(lr));
+ Register last_output_frame = lr;
+ __ Mov(last_output_frame, current_frame);
+
+ // We don't need to restore x7 as it will be clobbered later to hold the
+ // continuation address.
+ Register continuation = x7;
+ saved_registers.Remove(continuation);
+
+ while (!saved_registers.IsEmpty()) {
+ // TODO(all): Look for opportunities to optimize this by using ldp.
+ CPURegister current_reg = saved_registers.PopLowestIndex();
+ int offset = (current_reg.code() * kPointerSize) +
+ FrameDescription::registers_offset();
+ __ Ldr(current_reg, MemOperand(last_output_frame, offset));
+ }
+
+ __ Ldr(continuation, MemOperand(last_output_frame,
+ FrameDescription::continuation_offset()));
+ __ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
+ __ InitializeRootRegister();
+ __ Br(continuation);
+}
+
+
+// Size of an entry of the second level deopt table.
+// This is the code size generated by GeneratePrologue for one entry.
+const int Deoptimizer::table_entry_size_ = 2 * kInstructionSize;
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+ UseScratchRegisterScope temps(masm());
+ Register entry_id = temps.AcquireX();
+
+ // Create a sequence of deoptimization entries.
+ // Note that registers are still live when jumping to an entry.
+ Label done;
+ {
+ InstructionAccurateScope scope(masm());
+
+ // The number of entry will never exceed kMaxNumberOfEntries.
+ // As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
+ // a movz instruction to load the entry id.
+ ASSERT(is_uint16(Deoptimizer::kMaxNumberOfEntries));
+
+ for (int i = 0; i < count(); i++) {
+ int start = masm()->pc_offset();
+ USE(start);
+ __ movz(entry_id, i);
+ __ b(&done);
+ ASSERT(masm()->pc_offset() - start == table_entry_size_);
+ }
+ }
+ __ Bind(&done);
+ __ Push(entry_id);
+}
+
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
new file mode 100644
index 000000000..ed3e92879
--- /dev/null
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -0,0 +1,1856 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "disasm.h"
+#include "arm64/decoder-arm64-inl.h"
+#include "arm64/disasm-arm64.h"
+#include "macro-assembler.h"
+#include "platform.h"
+
+namespace v8 {
+namespace internal {
+
+
+Disassembler::Disassembler() {
+ buffer_size_ = 256;
+ buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
+ buffer_pos_ = 0;
+ own_buffer_ = true;
+}
+
+
+Disassembler::Disassembler(char* text_buffer, int buffer_size) {
+ buffer_size_ = buffer_size;
+ buffer_ = text_buffer;
+ buffer_pos_ = 0;
+ own_buffer_ = false;
+}
+
+
+Disassembler::~Disassembler() {
+ if (own_buffer_) {
+ free(buffer_);
+ }
+}
+
+
+char* Disassembler::GetOutput() {
+ return buffer_;
+}
+
+
+void Disassembler::VisitAddSubImmediate(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) &&
+ (instr->ImmAddSub() == 0) ? true : false;
+ const char *mnemonic = "";
+ const char *form = "'Rds, 'Rns, 'IAddSub";
+ const char *form_cmp = "'Rns, 'IAddSub";
+ const char *form_mov = "'Rds, 'Rns";
+
+ switch (instr->Mask(AddSubImmediateMask)) {
+ case ADD_w_imm:
+ case ADD_x_imm: {
+ mnemonic = "add";
+ if (stack_op) {
+ mnemonic = "mov";
+ form = form_mov;
+ }
+ break;
+ }
+ case ADDS_w_imm:
+ case ADDS_x_imm: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_imm:
+ case SUB_x_imm: mnemonic = "sub"; break;
+ case SUBS_w_imm:
+ case SUBS_x_imm: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubShifted(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm'HDP";
+ const char *form_cmp = "'Rn, 'Rm'HDP";
+ const char *form_neg = "'Rd, 'Rm'HDP";
+
+ switch (instr->Mask(AddSubShiftedMask)) {
+ case ADD_w_shift:
+ case ADD_x_shift: mnemonic = "add"; break;
+ case ADDS_w_shift:
+ case ADDS_x_shift: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_shift:
+ case SUB_x_shift: {
+ mnemonic = "sub";
+ if (rn_is_zr) {
+ mnemonic = "neg";
+ form = form_neg;
+ }
+ break;
+ }
+ case SUBS_w_shift:
+ case SUBS_x_shift: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ } else if (rn_is_zr) {
+ mnemonic = "negs";
+ form = form_neg;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubExtended(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ const char *mnemonic = "";
+ Extend mode = static_cast<Extend>(instr->ExtendMode());
+ const char *form = ((mode == UXTX) || (mode == SXTX)) ?
+ "'Rds, 'Rns, 'Xm'Ext" : "'Rds, 'Rns, 'Wm'Ext";
+ const char *form_cmp = ((mode == UXTX) || (mode == SXTX)) ?
+ "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext";
+
+ switch (instr->Mask(AddSubExtendedMask)) {
+ case ADD_w_ext:
+ case ADD_x_ext: mnemonic = "add"; break;
+ case ADDS_w_ext:
+ case ADDS_x_ext: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_ext:
+ case SUB_x_ext: mnemonic = "sub"; break;
+ case SUBS_w_ext:
+ case SUBS_x_ext: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubWithCarry(Instruction* instr) {
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm";
+ const char *form_neg = "'Rd, 'Rm";
+
+ switch (instr->Mask(AddSubWithCarryMask)) {
+ case ADC_w:
+ case ADC_x: mnemonic = "adc"; break;
+ case ADCS_w:
+ case ADCS_x: mnemonic = "adcs"; break;
+ case SBC_w:
+ case SBC_x: {
+ mnemonic = "sbc";
+ if (rn_is_zr) {
+ mnemonic = "ngc";
+ form = form_neg;
+ }
+ break;
+ }
+ case SBCS_w:
+ case SBCS_x: {
+ mnemonic = "sbcs";
+ if (rn_is_zr) {
+ mnemonic = "ngcs";
+ form = form_neg;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLogicalImmediate(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rds, 'Rn, 'ITri";
+
+ if (instr->ImmLogical() == 0) {
+ // The immediate encoded in the instruction is not in the expected format.
+ Format(instr, "unallocated", "(LogicalImmediate)");
+ return;
+ }
+
+ switch (instr->Mask(LogicalImmediateMask)) {
+ case AND_w_imm:
+ case AND_x_imm: mnemonic = "and"; break;
+ case ORR_w_imm:
+ case ORR_x_imm: {
+ mnemonic = "orr";
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->ImmLogical())) {
+ mnemonic = "mov";
+ form = "'Rds, 'ITri";
+ }
+ break;
+ }
+ case EOR_w_imm:
+ case EOR_x_imm: mnemonic = "eor"; break;
+ case ANDS_w_imm:
+ case ANDS_x_imm: {
+ mnemonic = "ands";
+ if (rd_is_zr) {
+ mnemonic = "tst";
+ form = "'Rn, 'ITri";
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
+ ASSERT((reg_size == kXRegSizeInBits) ||
+ ((reg_size == kWRegSizeInBits) && (value <= 0xffffffff)));
+
+ // Test for movz: 16-bits set at positions 0, 16, 32 or 48.
+ if (((value & 0xffffffffffff0000UL) == 0UL) ||
+ ((value & 0xffffffff0000ffffUL) == 0UL) ||
+ ((value & 0xffff0000ffffffffUL) == 0UL) ||
+ ((value & 0x0000ffffffffffffUL) == 0UL)) {
+ return true;
+ }
+
+ // Test for movn: NOT(16-bits set at positions 0, 16, 32 or 48).
+ if ((reg_size == kXRegSizeInBits) &&
+ (((value & 0xffffffffffff0000UL) == 0xffffffffffff0000UL) ||
+ ((value & 0xffffffff0000ffffUL) == 0xffffffff0000ffffUL) ||
+ ((value & 0xffff0000ffffffffUL) == 0xffff0000ffffffffUL) ||
+ ((value & 0x0000ffffffffffffUL) == 0x0000ffffffffffffUL))) {
+ return true;
+ }
+ if ((reg_size == kWRegSizeInBits) &&
+ (((value & 0xffff0000) == 0xffff0000) ||
+ ((value & 0x0000ffff) == 0x0000ffff))) {
+ return true;
+ }
+ return false;
+}
+
+
+void Disassembler::VisitLogicalShifted(Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm'HLo";
+
+ switch (instr->Mask(LogicalShiftedMask)) {
+ case AND_w:
+ case AND_x: mnemonic = "and"; break;
+ case BIC_w:
+ case BIC_x: mnemonic = "bic"; break;
+ case EOR_w:
+ case EOR_x: mnemonic = "eor"; break;
+ case EON_w:
+ case EON_x: mnemonic = "eon"; break;
+ case BICS_w:
+ case BICS_x: mnemonic = "bics"; break;
+ case ANDS_w:
+ case ANDS_x: {
+ mnemonic = "ands";
+ if (rd_is_zr) {
+ mnemonic = "tst";
+ form = "'Rn, 'Rm'HLo";
+ }
+ break;
+ }
+ case ORR_w:
+ case ORR_x: {
+ mnemonic = "orr";
+ if (rn_is_zr && (instr->ImmDPShift() == 0) && (instr->ShiftDP() == LSL)) {
+ mnemonic = "mov";
+ form = "'Rd, 'Rm";
+ }
+ break;
+ }
+ case ORN_w:
+ case ORN_x: {
+ mnemonic = "orn";
+ if (rn_is_zr) {
+ mnemonic = "mvn";
+ form = "'Rd, 'Rm'HLo";
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareRegister(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rn, 'Rm, 'INzcv, 'Cond";
+
+ switch (instr->Mask(ConditionalCompareRegisterMask)) {
+ case CCMN_w:
+ case CCMN_x: mnemonic = "ccmn"; break;
+ case CCMP_w:
+ case CCMP_x: mnemonic = "ccmp"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareImmediate(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rn, 'IP, 'INzcv, 'Cond";
+
+ switch (instr->Mask(ConditionalCompareImmediateMask)) {
+ case CCMN_w_imm:
+ case CCMN_x_imm: mnemonic = "ccmn"; break;
+ case CCMP_w_imm:
+ case CCMP_x_imm: mnemonic = "ccmp"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalSelect(Instruction* instr) {
+ bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr));
+ bool rn_is_rm = (instr->Rn() == instr->Rm());
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm, 'Cond";
+ const char *form_test = "'Rd, 'CInv";
+ const char *form_update = "'Rd, 'Rn, 'CInv";
+
+ Condition cond = static_cast<Condition>(instr->Condition());
+ bool invertible_cond = (cond != al) && (cond != nv);
+
+ switch (instr->Mask(ConditionalSelectMask)) {
+ case CSEL_w:
+ case CSEL_x: mnemonic = "csel"; break;
+ case CSINC_w:
+ case CSINC_x: {
+ mnemonic = "csinc";
+ if (rnm_is_zr && invertible_cond) {
+ mnemonic = "cset";
+ form = form_test;
+ } else if (rn_is_rm && invertible_cond) {
+ mnemonic = "cinc";
+ form = form_update;
+ }
+ break;
+ }
+ case CSINV_w:
+ case CSINV_x: {
+ mnemonic = "csinv";
+ if (rnm_is_zr && invertible_cond) {
+ mnemonic = "csetm";
+ form = form_test;
+ } else if (rn_is_rm && invertible_cond) {
+ mnemonic = "cinv";
+ form = form_update;
+ }
+ break;
+ }
+ case CSNEG_w:
+ case CSNEG_x: {
+ mnemonic = "csneg";
+ if (rn_is_rm && invertible_cond) {
+ mnemonic = "cneg";
+ form = form_update;
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitBitfield(Instruction* instr) {
+ unsigned s = instr->ImmS();
+ unsigned r = instr->ImmR();
+ unsigned rd_size_minus_1 =
+ ((instr->SixtyFourBits() == 1) ? kXRegSizeInBits : kWRegSizeInBits) - 1;
+ const char *mnemonic = "";
+ const char *form = "";
+ const char *form_shift_right = "'Rd, 'Rn, 'IBr";
+ const char *form_extend = "'Rd, 'Wn";
+ const char *form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1";
+ const char *form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1";
+ const char *form_lsl = "'Rd, 'Rn, 'IBZ-r";
+
+ switch (instr->Mask(BitfieldMask)) {
+ case SBFM_w:
+ case SBFM_x: {
+ mnemonic = "sbfx";
+ form = form_bfx;
+ if (r == 0) {
+ form = form_extend;
+ if (s == 7) {
+ mnemonic = "sxtb";
+ } else if (s == 15) {
+ mnemonic = "sxth";
+ } else if ((s == 31) && (instr->SixtyFourBits() == 1)) {
+ mnemonic = "sxtw";
+ } else {
+ form = form_bfx;
+ }
+ } else if (s == rd_size_minus_1) {
+ mnemonic = "asr";
+ form = form_shift_right;
+ } else if (s < r) {
+ mnemonic = "sbfiz";
+ form = form_bfiz;
+ }
+ break;
+ }
+ case UBFM_w:
+ case UBFM_x: {
+ mnemonic = "ubfx";
+ form = form_bfx;
+ if (r == 0) {
+ form = form_extend;
+ if (s == 7) {
+ mnemonic = "uxtb";
+ } else if (s == 15) {
+ mnemonic = "uxth";
+ } else {
+ form = form_bfx;
+ }
+ }
+ if (s == rd_size_minus_1) {
+ mnemonic = "lsr";
+ form = form_shift_right;
+ } else if (r == s + 1) {
+ mnemonic = "lsl";
+ form = form_lsl;
+ } else if (s < r) {
+ mnemonic = "ubfiz";
+ form = form_bfiz;
+ }
+ break;
+ }
+ case BFM_w:
+ case BFM_x: {
+ mnemonic = "bfxil";
+ form = form_bfx;
+ if (s < r) {
+ mnemonic = "bfi";
+ form = form_bfiz;
+ }
+ }
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitExtract(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm, 'IExtract";
+
+ switch (instr->Mask(ExtractMask)) {
+ case EXTR_w:
+ case EXTR_x: {
+ if (instr->Rn() == instr->Rm()) {
+ mnemonic = "ror";
+ form = "'Rd, 'Rn, 'IExtract";
+ } else {
+ mnemonic = "extr";
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitPCRelAddressing(Instruction* instr) {
+ switch (instr->Mask(PCRelAddressingMask)) {
+ case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break;
+ // ADRP is not implemented.
+ default: Format(instr, "unimplemented", "(PCRelAddressing)");
+ }
+}
+
+
+void Disassembler::VisitConditionalBranch(Instruction* instr) {
+ switch (instr->Mask(ConditionalBranchMask)) {
+ case B_cond: Format(instr, "b.'CBrn", "'BImmCond"); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void Disassembler::VisitUnconditionalBranchToRegister(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Xn";
+
+ switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+ case BR: mnemonic = "br"; break;
+ case BLR: mnemonic = "blr"; break;
+ case RET: {
+ mnemonic = "ret";
+ if (instr->Rn() == kLinkRegCode) {
+ form = NULL;
+ }
+ break;
+ }
+ default: form = "(UnconditionalBranchToRegister)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitUnconditionalBranch(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'BImmUncn";
+
+ switch (instr->Mask(UnconditionalBranchMask)) {
+ case B: mnemonic = "b"; break;
+ case BL: mnemonic = "bl"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing1Source(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn";
+
+ switch (instr->Mask(DataProcessing1SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: mnemonic = B; break;
+ FORMAT(RBIT, "rbit");
+ FORMAT(REV16, "rev16");
+ FORMAT(REV, "rev");
+ FORMAT(CLZ, "clz");
+ FORMAT(CLS, "cls");
+ #undef FORMAT
+ case REV32_x: mnemonic = "rev32"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing2Source(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Rd, 'Rn, 'Rm";
+
+ switch (instr->Mask(DataProcessing2SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: mnemonic = B; break;
+ FORMAT(UDIV, "udiv");
+ FORMAT(SDIV, "sdiv");
+ FORMAT(LSLV, "lsl");
+ FORMAT(LSRV, "lsr");
+ FORMAT(ASRV, "asr");
+ FORMAT(RORV, "ror");
+ #undef FORMAT
+ default: form = "(DataProcessing2Source)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing3Source(Instruction* instr) {
+ bool ra_is_zr = RaIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Xd, 'Wn, 'Wm, 'Xa";
+ const char *form_rrr = "'Rd, 'Rn, 'Rm";
+ const char *form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra";
+ const char *form_xww = "'Xd, 'Wn, 'Wm";
+ const char *form_xxx = "'Xd, 'Xn, 'Xm";
+
+ switch (instr->Mask(DataProcessing3SourceMask)) {
+ case MADD_w:
+ case MADD_x: {
+ mnemonic = "madd";
+ form = form_rrrr;
+ if (ra_is_zr) {
+ mnemonic = "mul";
+ form = form_rrr;
+ }
+ break;
+ }
+ case MSUB_w:
+ case MSUB_x: {
+ mnemonic = "msub";
+ form = form_rrrr;
+ if (ra_is_zr) {
+ mnemonic = "mneg";
+ form = form_rrr;
+ }
+ break;
+ }
+ case SMADDL_x: {
+ mnemonic = "smaddl";
+ if (ra_is_zr) {
+ mnemonic = "smull";
+ form = form_xww;
+ }
+ break;
+ }
+ case SMSUBL_x: {
+ mnemonic = "smsubl";
+ if (ra_is_zr) {
+ mnemonic = "smnegl";
+ form = form_xww;
+ }
+ break;
+ }
+ case UMADDL_x: {
+ mnemonic = "umaddl";
+ if (ra_is_zr) {
+ mnemonic = "umull";
+ form = form_xww;
+ }
+ break;
+ }
+ case UMSUBL_x: {
+ mnemonic = "umsubl";
+ if (ra_is_zr) {
+ mnemonic = "umnegl";
+ form = form_xww;
+ }
+ break;
+ }
+ case SMULH_x: {
+ mnemonic = "smulh";
+ form = form_xxx;
+ break;
+ }
+ case UMULH_x: {
+ mnemonic = "umulh";
+ form = form_xxx;
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitCompareBranch(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rt, 'BImmCmpa";
+
+ switch (instr->Mask(CompareBranchMask)) {
+ case CBZ_w:
+ case CBZ_x: mnemonic = "cbz"; break;
+ case CBNZ_w:
+ case CBNZ_x: mnemonic = "cbnz"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitTestBranch(Instruction* instr) {
+ const char *mnemonic = "";
+ // If the top bit of the immediate is clear, the tested register is
+ // disassembled as Wt, otherwise Xt. As the top bit of the immediate is
+ // encoded in bit 31 of the instruction, we can reuse the Rt form, which
+ // uses bit 31 (normally "sf") to choose the register size.
+ const char *form = "'Rt, 'IS, 'BImmTest";
+
+ switch (instr->Mask(TestBranchMask)) {
+ case TBZ: mnemonic = "tbz"; break;
+ case TBNZ: mnemonic = "tbnz"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitMoveWideImmediate(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'IMoveImm";
+
+ // Print the shift separately for movk, to make it clear which half word will
+ // be overwritten. Movn and movz print the computed immediate, which includes
+ // shift calculation.
+ switch (instr->Mask(MoveWideImmediateMask)) {
+ case MOVN_w:
+ case MOVN_x: mnemonic = "movn"; break;
+ case MOVZ_w:
+ case MOVZ_x: mnemonic = "movz"; break;
+ case MOVK_w:
+ case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_LIST(V) \
+ V(STRB_w, "strb", "'Wt") \
+ V(STRH_w, "strh", "'Wt") \
+ V(STR_w, "str", "'Wt") \
+ V(STR_x, "str", "'Xt") \
+ V(LDRB_w, "ldrb", "'Wt") \
+ V(LDRH_w, "ldrh", "'Wt") \
+ V(LDR_w, "ldr", "'Wt") \
+ V(LDR_x, "ldr", "'Xt") \
+ V(LDRSB_x, "ldrsb", "'Xt") \
+ V(LDRSH_x, "ldrsh", "'Xt") \
+ V(LDRSW_x, "ldrsw", "'Xt") \
+ V(LDRSB_w, "ldrsb", "'Wt") \
+ V(LDRSH_w, "ldrsh", "'Wt") \
+ V(STR_s, "str", "'St") \
+ V(STR_d, "str", "'Dt") \
+ V(LDR_s, "ldr", "'St") \
+ V(LDR_d, "ldr", "'Dt")
+
+void Disassembler::VisitLoadStorePreIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePreIndex)";
+
+ switch (instr->Mask(LoadStorePreIndexMask)) {
+ #define LS_PREINDEX(A, B, C) \
+ case A##_pre: mnemonic = B; form = C ", ['Xns'ILS]!"; break;
+ LOAD_STORE_LIST(LS_PREINDEX)
+ #undef LS_PREINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePostIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePostIndex)";
+
+ switch (instr->Mask(LoadStorePostIndexMask)) {
+ #define LS_POSTINDEX(A, B, C) \
+ case A##_post: mnemonic = B; form = C ", ['Xns]'ILS"; break;
+ LOAD_STORE_LIST(LS_POSTINDEX)
+ #undef LS_POSTINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnsignedOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStoreUnsignedOffset)";
+
+ switch (instr->Mask(LoadStoreUnsignedOffsetMask)) {
+ #define LS_UNSIGNEDOFFSET(A, B, C) \
+ case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break;
+ LOAD_STORE_LIST(LS_UNSIGNEDOFFSET)
+ #undef LS_UNSIGNEDOFFSET
+ case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xn'ILU]";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreRegisterOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStoreRegisterOffset)";
+
+ switch (instr->Mask(LoadStoreRegisterOffsetMask)) {
+ #define LS_REGISTEROFFSET(A, B, C) \
+ case A##_reg: mnemonic = B; form = C ", ['Xns, 'Offsetreg]"; break;
+ LOAD_STORE_LIST(LS_REGISTEROFFSET)
+ #undef LS_REGISTEROFFSET
+ case PRFM_reg: mnemonic = "prfm"; form = "'PrefOp, ['Xns, 'Offsetreg]";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnscaledOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Wt, ['Xns'ILS]";
+ const char *form_x = "'Xt, ['Xns'ILS]";
+ const char *form_s = "'St, ['Xns'ILS]";
+ const char *form_d = "'Dt, ['Xns'ILS]";
+
+ switch (instr->Mask(LoadStoreUnscaledOffsetMask)) {
+ case STURB_w: mnemonic = "sturb"; break;
+ case STURH_w: mnemonic = "sturh"; break;
+ case STUR_w: mnemonic = "stur"; break;
+ case STUR_x: mnemonic = "stur"; form = form_x; break;
+ case STUR_s: mnemonic = "stur"; form = form_s; break;
+ case STUR_d: mnemonic = "stur"; form = form_d; break;
+ case LDURB_w: mnemonic = "ldurb"; break;
+ case LDURH_w: mnemonic = "ldurh"; break;
+ case LDUR_w: mnemonic = "ldur"; break;
+ case LDUR_x: mnemonic = "ldur"; form = form_x; break;
+ case LDUR_s: mnemonic = "ldur"; form = form_s; break;
+ case LDUR_d: mnemonic = "ldur"; form = form_d; break;
+ case LDURSB_x: form = form_x; // Fall through.
+ case LDURSB_w: mnemonic = "ldursb"; break;
+ case LDURSH_x: form = form_x; // Fall through.
+ case LDURSH_w: mnemonic = "ldursh"; break;
+ case LDURSW_x: mnemonic = "ldursw"; form = form_x; break;
+ default: form = "(LoadStoreUnscaledOffset)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadLiteral(Instruction* instr) {
+ const char *mnemonic = "ldr";
+ const char *form = "(LoadLiteral)";
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit: form = "'Wt, 'ILLiteral 'LValue"; break;
+ case LDR_x_lit: form = "'Xt, 'ILLiteral 'LValue"; break;
+ case LDR_s_lit: form = "'St, 'ILLiteral 'LValue"; break;
+ case LDR_d_lit: form = "'Dt, 'ILLiteral 'LValue"; break;
+ default: mnemonic = "unimplemented";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_PAIR_LIST(V) \
+ V(STP_w, "stp", "'Wt, 'Wt2", "4") \
+ V(LDP_w, "ldp", "'Wt, 'Wt2", "4") \
+ V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "4") \
+ V(STP_x, "stp", "'Xt, 'Xt2", "8") \
+ V(LDP_x, "ldp", "'Xt, 'Xt2", "8") \
+ V(STP_s, "stp", "'St, 'St2", "4") \
+ V(LDP_s, "ldp", "'St, 'St2", "4") \
+ V(STP_d, "stp", "'Dt, 'Dt2", "8") \
+ V(LDP_d, "ldp", "'Dt, 'Dt2", "8")
+
+void Disassembler::VisitLoadStorePairPostIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairPostIndex)";
+
+ switch (instr->Mask(LoadStorePairPostIndexMask)) {
+ #define LSP_POSTINDEX(A, B, C, D) \
+ case A##_post: mnemonic = B; form = C ", ['Xns]'ILP" D; break;
+ LOAD_STORE_PAIR_LIST(LSP_POSTINDEX)
+ #undef LSP_POSTINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairPreIndex(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairPreIndex)";
+
+ switch (instr->Mask(LoadStorePairPreIndexMask)) {
+ #define LSP_PREINDEX(A, B, C, D) \
+ case A##_pre: mnemonic = B; form = C ", ['Xns'ILP" D "]!"; break;
+ LOAD_STORE_PAIR_LIST(LSP_PREINDEX)
+ #undef LSP_PREINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairOffset(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairOffset)";
+
+ switch (instr->Mask(LoadStorePairOffsetMask)) {
+ #define LSP_OFFSET(A, B, C, D) \
+ case A##_off: mnemonic = B; form = C ", ['Xns'ILP" D "]"; break;
+ LOAD_STORE_PAIR_LIST(LSP_OFFSET)
+ #undef LSP_OFFSET
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairNonTemporal(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form;
+
+ switch (instr->Mask(LoadStorePairNonTemporalMask)) {
+ case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
+ case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
+ case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
+ case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
+ case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
+ case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
+ case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
+ case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
+ default: form = "(LoadStorePairNonTemporal)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPCompare(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fn, 'Fm";
+ const char *form_zero = "'Fn, #0.0";
+
+ switch (instr->Mask(FPCompareMask)) {
+ case FCMP_s_zero:
+ case FCMP_d_zero: form = form_zero; // Fall through.
+ case FCMP_s:
+ case FCMP_d: mnemonic = "fcmp"; break;
+ default: form = "(FPCompare)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalCompare(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fn, 'Fm, 'INzcv, 'Cond";
+
+ switch (instr->Mask(FPConditionalCompareMask)) {
+ case FCCMP_s:
+ case FCCMP_d: mnemonic = "fccmp"; break;
+ case FCCMPE_s:
+ case FCCMPE_d: mnemonic = "fccmpe"; break;
+ default: form = "(FPConditionalCompare)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalSelect(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm, 'Cond";
+
+ switch (instr->Mask(FPConditionalSelectMask)) {
+ case FCSEL_s:
+ case FCSEL_d: mnemonic = "fcsel"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing1Source(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fd, 'Fn";
+
+ switch (instr->Mask(FPDataProcessing1SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMOV, "fmov");
+ FORMAT(FABS, "fabs");
+ FORMAT(FNEG, "fneg");
+ FORMAT(FSQRT, "fsqrt");
+ FORMAT(FRINTN, "frintn");
+ FORMAT(FRINTP, "frintp");
+ FORMAT(FRINTM, "frintm");
+ FORMAT(FRINTZ, "frintz");
+ FORMAT(FRINTA, "frinta");
+ FORMAT(FRINTX, "frintx");
+ FORMAT(FRINTI, "frinti");
+ #undef FORMAT
+ case FCVT_ds: mnemonic = "fcvt"; form = "'Dd, 'Sn"; break;
+ case FCVT_sd: mnemonic = "fcvt"; form = "'Sd, 'Dn"; break;
+ default: form = "(FPDataProcessing1Source)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing2Source(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm";
+
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMUL, "fmul");
+ FORMAT(FDIV, "fdiv");
+ FORMAT(FADD, "fadd");
+ FORMAT(FSUB, "fsub");
+ FORMAT(FMAX, "fmax");
+ FORMAT(FMIN, "fmin");
+ FORMAT(FMAXNM, "fmaxnm");
+ FORMAT(FMINNM, "fminnm");
+ FORMAT(FNMUL, "fnmul");
+ #undef FORMAT
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing3Source(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm, 'Fa";
+
+ switch (instr->Mask(FPDataProcessing3SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMADD, "fmadd");
+ FORMAT(FMSUB, "fmsub");
+ FORMAT(FNMADD, "fnmadd");
+ FORMAT(FNMSUB, "fnmsub");
+ #undef FORMAT
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPImmediate(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "(FPImmediate)";
+
+ switch (instr->Mask(FPImmediateMask)) {
+ case FMOV_s_imm: mnemonic = "fmov"; form = "'Sd, 'IFPSingle"; break;
+ case FMOV_d_imm: mnemonic = "fmov"; form = "'Dd, 'IFPDouble"; break;
+ default: UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPIntegerConvert(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(FPIntegerConvert)";
+ const char *form_rf = "'Rd, 'Fn";
+ const char *form_fr = "'Fd, 'Rn";
+
+ switch (instr->Mask(FPIntegerConvertMask)) {
+ case FMOV_ws:
+ case FMOV_xd: mnemonic = "fmov"; form = form_rf; break;
+ case FMOV_sw:
+ case FMOV_dx: mnemonic = "fmov"; form = form_fr; break;
+ case FCVTAS_ws:
+ case FCVTAS_xs:
+ case FCVTAS_wd:
+ case FCVTAS_xd: mnemonic = "fcvtas"; form = form_rf; break;
+ case FCVTAU_ws:
+ case FCVTAU_xs:
+ case FCVTAU_wd:
+ case FCVTAU_xd: mnemonic = "fcvtau"; form = form_rf; break;
+ case FCVTMS_ws:
+ case FCVTMS_xs:
+ case FCVTMS_wd:
+ case FCVTMS_xd: mnemonic = "fcvtms"; form = form_rf; break;
+ case FCVTMU_ws:
+ case FCVTMU_xs:
+ case FCVTMU_wd:
+ case FCVTMU_xd: mnemonic = "fcvtmu"; form = form_rf; break;
+ case FCVTNS_ws:
+ case FCVTNS_xs:
+ case FCVTNS_wd:
+ case FCVTNS_xd: mnemonic = "fcvtns"; form = form_rf; break;
+ case FCVTNU_ws:
+ case FCVTNU_xs:
+ case FCVTNU_wd:
+ case FCVTNU_xd: mnemonic = "fcvtnu"; form = form_rf; break;
+ case FCVTZU_xd:
+ case FCVTZU_ws:
+ case FCVTZU_wd:
+ case FCVTZU_xs: mnemonic = "fcvtzu"; form = form_rf; break;
+ case FCVTZS_xd:
+ case FCVTZS_wd:
+ case FCVTZS_xs:
+ case FCVTZS_ws: mnemonic = "fcvtzs"; form = form_rf; break;
+ case SCVTF_sw:
+ case SCVTF_sx:
+ case SCVTF_dw:
+ case SCVTF_dx: mnemonic = "scvtf"; form = form_fr; break;
+ case UCVTF_sw:
+ case UCVTF_sx:
+ case UCVTF_dw:
+ case UCVTF_dx: mnemonic = "ucvtf"; form = form_fr; break;
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPFixedPointConvert(Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Fn, 'IFPFBits";
+ const char *form_fr = "'Fd, 'Rn, 'IFPFBits";
+
+ switch (instr->Mask(FPFixedPointConvertMask)) {
+ case FCVTZS_ws_fixed:
+ case FCVTZS_xs_fixed:
+ case FCVTZS_wd_fixed:
+ case FCVTZS_xd_fixed: mnemonic = "fcvtzs"; break;
+ case FCVTZU_ws_fixed:
+ case FCVTZU_xs_fixed:
+ case FCVTZU_wd_fixed:
+ case FCVTZU_xd_fixed: mnemonic = "fcvtzu"; break;
+ case SCVTF_sw_fixed:
+ case SCVTF_sx_fixed:
+ case SCVTF_dw_fixed:
+ case SCVTF_dx_fixed: mnemonic = "scvtf"; form = form_fr; break;
+ case UCVTF_sw_fixed:
+ case UCVTF_sx_fixed:
+ case UCVTF_dw_fixed:
+ case UCVTF_dx_fixed: mnemonic = "ucvtf"; form = form_fr; break;
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitSystem(Instruction* instr) {
+ // Some system instructions hijack their Op and Cp fields to represent a
+ // range of immediates instead of indicating a different instruction. This
+ // makes the decoding tricky.
+ const char *mnemonic = "unimplemented";
+ const char *form = "(System)";
+
+ if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+ switch (instr->Mask(SystemSysRegMask)) {
+ case MRS: {
+ mnemonic = "mrs";
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: form = "'Xt, nzcv"; break;
+ case FPCR: form = "'Xt, fpcr"; break;
+ default: form = "'Xt, (unknown)"; break;
+ }
+ break;
+ }
+ case MSR: {
+ mnemonic = "msr";
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: form = "nzcv, 'Xt"; break;
+ case FPCR: form = "fpcr, 'Xt"; break;
+ default: form = "(unknown), 'Xt"; break;
+ }
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
+ ASSERT(instr->Mask(SystemHintMask) == HINT);
+ switch (instr->ImmHint()) {
+ case NOP: {
+ mnemonic = "nop";
+ form = NULL;
+ break;
+ }
+ }
+ } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+ switch (instr->Mask(MemBarrierMask)) {
+ case DMB: {
+ mnemonic = "dmb";
+ form = "'M";
+ break;
+ }
+ case DSB: {
+ mnemonic = "dsb";
+ form = "'M";
+ break;
+ }
+ case ISB: {
+ mnemonic = "isb";
+ form = NULL;
+ break;
+ }
+ }
+ }
+
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitException(Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'IDebug";
+
+ switch (instr->Mask(ExceptionMask)) {
+ case HLT: mnemonic = "hlt"; break;
+ case BRK: mnemonic = "brk"; break;
+ case SVC: mnemonic = "svc"; break;
+ case HVC: mnemonic = "hvc"; break;
+ case SMC: mnemonic = "smc"; break;
+ case DCPS1: mnemonic = "dcps1"; form = "{'IDebug}"; break;
+ case DCPS2: mnemonic = "dcps2"; form = "{'IDebug}"; break;
+ case DCPS3: mnemonic = "dcps3"; form = "{'IDebug}"; break;
+ default: form = "(Exception)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitUnimplemented(Instruction* instr) {
+ Format(instr, "unimplemented", "(Unimplemented)");
+}
+
+
+void Disassembler::VisitUnallocated(Instruction* instr) {
+ Format(instr, "unallocated", "(Unallocated)");
+}
+
+
+void Disassembler::ProcessOutput(Instruction* /*instr*/) {
+ // The base disasm does nothing more than disassembling into a buffer.
+}
+
+
+void Disassembler::Format(Instruction* instr, const char* mnemonic,
+ const char* format) {
+ // TODO(mcapewel) don't think I can use the instr address here - there needs
+ // to be a base address too
+ ASSERT(mnemonic != NULL);
+ ResetOutput();
+ Substitute(instr, mnemonic);
+ if (format != NULL) {
+ buffer_[buffer_pos_++] = ' ';
+ Substitute(instr, format);
+ }
+ buffer_[buffer_pos_] = 0;
+ ProcessOutput(instr);
+}
+
+
+void Disassembler::Substitute(Instruction* instr, const char* string) {
+ char chr = *string++;
+ while (chr != '\0') {
+ if (chr == '\'') {
+ string += SubstituteField(instr, string);
+ } else {
+ buffer_[buffer_pos_++] = chr;
+ }
+ chr = *string++;
+ }
+}
+
+
+int Disassembler::SubstituteField(Instruction* instr, const char* format) {
+ switch (format[0]) {
+ case 'R': // Register. X or W, selected by sf bit.
+ case 'F': // FP Register. S or D, selected by type field.
+ case 'W':
+ case 'X':
+ case 'S':
+ case 'D': return SubstituteRegisterField(instr, format);
+ case 'I': return SubstituteImmediateField(instr, format);
+ case 'L': return SubstituteLiteralField(instr, format);
+ case 'H': return SubstituteShiftField(instr, format);
+ case 'P': return SubstitutePrefetchField(instr, format);
+ case 'C': return SubstituteConditionField(instr, format);
+ case 'E': return SubstituteExtendField(instr, format);
+ case 'A': return SubstitutePCRelAddressField(instr, format);
+ case 'B': return SubstituteBranchTargetField(instr, format);
+ case 'O': return SubstituteLSRegOffsetField(instr, format);
+ case 'M': return SubstituteBarrierField(instr, format);
+ default: {
+ UNREACHABLE();
+ return 1;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteRegisterField(Instruction* instr,
+ const char* format) {
+ unsigned reg_num = 0;
+ unsigned field_len = 2;
+ switch (format[1]) {
+ case 'd': reg_num = instr->Rd(); break;
+ case 'n': reg_num = instr->Rn(); break;
+ case 'm': reg_num = instr->Rm(); break;
+ case 'a': reg_num = instr->Ra(); break;
+ case 't': {
+ if (format[2] == '2') {
+ reg_num = instr->Rt2();
+ field_len = 3;
+ } else {
+ reg_num = instr->Rt();
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ // Increase field length for registers tagged as stack.
+ if (format[2] == 's') {
+ field_len = 3;
+ }
+
+ char reg_type;
+ if (format[0] == 'R') {
+ // Register type is R: use sf bit to choose X and W.
+ reg_type = instr->SixtyFourBits() ? 'x' : 'w';
+ } else if (format[0] == 'F') {
+ // Floating-point register: use type field to choose S or D.
+ reg_type = ((instr->FPType() & 1) == 0) ? 's' : 'd';
+ } else {
+ // Register type is specified. Make it lower case.
+ reg_type = format[0] + 0x20;
+ }
+
+ if ((reg_num != kZeroRegCode) || (reg_type == 's') || (reg_type == 'd')) {
+ // A normal register: w0 - w30, x0 - x30, s0 - s31, d0 - d31.
+
+ // Filter special registers
+ if ((reg_type == 'x') && (reg_num == 27)) {
+ AppendToOutput("cp");
+ } else if ((reg_type == 'x') && (reg_num == 28)) {
+ AppendToOutput("jssp");
+ } else if ((reg_type == 'x') && (reg_num == 29)) {
+ AppendToOutput("fp");
+ } else if ((reg_type == 'x') && (reg_num == 30)) {
+ AppendToOutput("lr");
+ } else {
+ AppendToOutput("%c%d", reg_type, reg_num);
+ }
+ } else if (format[2] == 's') {
+ // Disassemble w31/x31 as stack pointer wcsp/csp.
+ AppendToOutput("%s", (reg_type == 'w') ? "wcsp" : "csp");
+ } else {
+ // Disassemble w31/x31 as zero register wzr/xzr.
+ AppendToOutput("%czr", reg_type);
+ }
+
+ return field_len;
+}
+
+
+int Disassembler::SubstituteImmediateField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'I');
+
+ switch (format[1]) {
+ case 'M': { // IMoveImm or IMoveLSL.
+ if (format[5] == 'I') {
+ uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
+ AppendToOutput("#0x%" PRIx64, imm);
+ } else {
+ ASSERT(format[5] == 'L');
+ AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
+ if (instr->ShiftMoveWide() > 0) {
+ AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
+ }
+ }
+ return 8;
+ }
+ case 'L': {
+ switch (format[2]) {
+ case 'L': { // ILLiteral - Immediate Load Literal.
+ AppendToOutput("pc%+" PRId64,
+ instr->ImmLLiteral() << kLiteralEntrySizeLog2);
+ return 9;
+ }
+ case 'S': { // ILS - Immediate Load/Store.
+ if (instr->ImmLS() != 0) {
+ AppendToOutput(", #%" PRId64, instr->ImmLS());
+ }
+ return 3;
+ }
+ case 'P': { // ILPx - Immediate Load/Store Pair, x = access size.
+ if (instr->ImmLSPair() != 0) {
+ // format[3] is the scale value. Convert to a number.
+ int scale = format[3] - 0x30;
+ AppendToOutput(", #%" PRId64, instr->ImmLSPair() * scale);
+ }
+ return 4;
+ }
+ case 'U': { // ILU - Immediate Load/Store Unsigned.
+ if (instr->ImmLSUnsigned() != 0) {
+ AppendToOutput(", #%" PRIu64,
+ instr->ImmLSUnsigned() << instr->SizeLS());
+ }
+ return 3;
+ }
+ }
+ }
+ case 'C': { // ICondB - Immediate Conditional Branch.
+ int64_t offset = instr->ImmCondBranch() << 2;
+ char sign = (offset >= 0) ? '+' : '-';
+ AppendToOutput("#%c0x%" PRIx64, sign, offset);
+ return 6;
+ }
+ case 'A': { // IAddSub.
+ ASSERT(instr->ShiftAddSub() <= 1);
+ int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
+ AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
+ return 7;
+ }
+ case 'F': { // IFPSingle, IFPDouble or IFPFBits.
+ if (format[3] == 'F') { // IFPFBits.
+ AppendToOutput("#%d", 64 - instr->FPScale());
+ return 8;
+ } else {
+ AppendToOutput("#0x%" PRIx64 " (%.4f)", instr->ImmFP(),
+ format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64());
+ return 9;
+ }
+ }
+ case 'T': { // ITri - Immediate Triangular Encoded.
+ AppendToOutput("#0x%" PRIx64, instr->ImmLogical());
+ return 4;
+ }
+ case 'N': { // INzcv.
+ int nzcv = (instr->Nzcv() << Flags_offset);
+ AppendToOutput("#%c%c%c%c", ((nzcv & NFlag) == 0) ? 'n' : 'N',
+ ((nzcv & ZFlag) == 0) ? 'z' : 'Z',
+ ((nzcv & CFlag) == 0) ? 'c' : 'C',
+ ((nzcv & VFlag) == 0) ? 'v' : 'V');
+ return 5;
+ }
+ case 'P': { // IP - Conditional compare.
+ AppendToOutput("#%d", instr->ImmCondCmp());
+ return 2;
+ }
+ case 'B': { // Bitfields.
+ return SubstituteBitfieldImmediateField(instr, format);
+ }
+ case 'E': { // IExtract.
+ AppendToOutput("#%d", instr->ImmS());
+ return 8;
+ }
+ case 'S': { // IS - Test and branch bit.
+ AppendToOutput("#%d", (instr->ImmTestBranchBit5() << 5) |
+ instr->ImmTestBranchBit40());
+ return 2;
+ }
+ case 'D': { // IDebug - HLT and BRK instructions.
+ AppendToOutput("#0x%x", instr->ImmException());
+ return 6;
+ }
+ default: {
+ UNREACHABLE();
+ return 0;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
+ const char* format) {
+ ASSERT((format[0] == 'I') && (format[1] == 'B'));
+ unsigned r = instr->ImmR();
+ unsigned s = instr->ImmS();
+
+ switch (format[2]) {
+ case 'r': { // IBr.
+ AppendToOutput("#%d", r);
+ return 3;
+ }
+ case 's': { // IBs+1 or IBs-r+1.
+ if (format[3] == '+') {
+ AppendToOutput("#%d", s + 1);
+ return 5;
+ } else {
+ ASSERT(format[3] == '-');
+ AppendToOutput("#%d", s - r + 1);
+ return 7;
+ }
+ }
+ case 'Z': { // IBZ-r.
+ ASSERT((format[3] == '-') && (format[4] == 'r'));
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ AppendToOutput("#%d", reg_size - r);
+ return 5;
+ }
+ default: {
+ UNREACHABLE();
+ return 0;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteLiteralField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "LValue", 6) == 0);
+ USE(format);
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit:
+ case LDR_x_lit:
+ case LDR_s_lit:
+ case LDR_d_lit: AppendToOutput("(addr %p)", instr->LiteralAddress()); break;
+ default: UNREACHABLE();
+ }
+
+ return 6;
+}
+
+
+int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
+ ASSERT(format[0] == 'H');
+ ASSERT(instr->ShiftDP() <= 0x3);
+
+ switch (format[1]) {
+ case 'D': { // HDP.
+ ASSERT(instr->ShiftDP() != ROR);
+ } // Fall through.
+ case 'L': { // HLo.
+ if (instr->ImmDPShift() != 0) {
+ const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
+ AppendToOutput(", %s #%" PRId64, shift_type[instr->ShiftDP()],
+ instr->ImmDPShift());
+ }
+ return 3;
+ }
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+
+int Disassembler::SubstituteConditionField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'C');
+ const char* condition_code[] = { "eq", "ne", "hs", "lo",
+ "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt",
+ "gt", "le", "al", "nv" };
+ int cond;
+ switch (format[1]) {
+ case 'B': cond = instr->ConditionBranch(); break;
+ case 'I': {
+ cond = InvertCondition(static_cast<Condition>(instr->Condition()));
+ break;
+ }
+ default: cond = instr->Condition();
+ }
+ AppendToOutput("%s", condition_code[cond]);
+ return 4;
+}
+
+
+int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
+ const char* format) {
+ USE(format);
+ ASSERT(strncmp(format, "AddrPCRel", 9) == 0);
+
+ int offset = instr->ImmPCRel();
+
+ // Only ADR (AddrPCRelByte) is supported.
+ ASSERT(strcmp(format, "AddrPCRelByte") == 0);
+
+ char sign = '+';
+ if (offset < 0) {
+ offset = -offset;
+ sign = '-';
+ }
+ AppendToOutput("#%c0x%x (addr %p)", sign, offset,
+ instr->InstructionAtOffset(offset, Instruction::NO_CHECK));
+ return 13;
+}
+
+
+int Disassembler::SubstituteBranchTargetField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "BImm", 4) == 0);
+
+ int64_t offset = 0;
+ switch (format[5]) {
+ // BImmUncn - unconditional branch immediate.
+ case 'n': offset = instr->ImmUncondBranch(); break;
+ // BImmCond - conditional branch immediate.
+ case 'o': offset = instr->ImmCondBranch(); break;
+ // BImmCmpa - compare and branch immediate.
+ case 'm': offset = instr->ImmCmpBranch(); break;
+ // BImmTest - test and branch immediate.
+ case 'e': offset = instr->ImmTestBranch(); break;
+ default: UNREACHABLE();
+ }
+ offset <<= kInstructionSizeLog2;
+ char sign = '+';
+ if (offset < 0) {
+ offset = -offset;
+ sign = '-';
+ }
+ AppendToOutput("#%c0x%" PRIx64 " (addr %p)", sign, offset,
+ instr->InstructionAtOffset(offset), Instruction::NO_CHECK);
+ return 8;
+}
+
+
+int Disassembler::SubstituteExtendField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "Ext", 3) == 0);
+ ASSERT(instr->ExtendMode() <= 7);
+ USE(format);
+
+ const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
+ "sxtb", "sxth", "sxtw", "sxtx" };
+
+ // If rd or rn is SP, uxtw on 32-bit registers and uxtx on 64-bit
+ // registers becomes lsl.
+ if (((instr->Rd() == kZeroRegCode) || (instr->Rn() == kZeroRegCode)) &&
+ (((instr->ExtendMode() == UXTW) && (instr->SixtyFourBits() == 0)) ||
+ (instr->ExtendMode() == UXTX))) {
+ if (instr->ImmExtendShift() > 0) {
+ AppendToOutput(", lsl #%d", instr->ImmExtendShift());
+ }
+ } else {
+ AppendToOutput(", %s", extend_mode[instr->ExtendMode()]);
+ if (instr->ImmExtendShift() > 0) {
+ AppendToOutput(" #%d", instr->ImmExtendShift());
+ }
+ }
+ return 3;
+}
+
+
+int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
+ const char* format) {
+ ASSERT(strncmp(format, "Offsetreg", 9) == 0);
+ const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
+ "undefined", "undefined", "sxtw", "sxtx" };
+ USE(format);
+
+ unsigned shift = instr->ImmShiftLS();
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ char reg_type = ((ext == UXTW) || (ext == SXTW)) ? 'w' : 'x';
+
+ unsigned rm = instr->Rm();
+ if (rm == kZeroRegCode) {
+ AppendToOutput("%czr", reg_type);
+ } else {
+ AppendToOutput("%c%d", reg_type, rm);
+ }
+
+ // Extend mode UXTX is an alias for shift mode LSL here.
+ if (!((ext == UXTX) && (shift == 0))) {
+ AppendToOutput(", %s", extend_mode[ext]);
+ if (shift != 0) {
+ AppendToOutput(" #%d", instr->SizeLS());
+ }
+ }
+ return 9;
+}
+
+
+int Disassembler::SubstitutePrefetchField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'P');
+ USE(format);
+
+ int prefetch_mode = instr->PrefetchMode();
+
+ const char* ls = (prefetch_mode & 0x10) ? "st" : "ld";
+ int level = (prefetch_mode >> 1) + 1;
+ const char* ks = (prefetch_mode & 1) ? "strm" : "keep";
+
+ AppendToOutput("p%sl%d%s", ls, level, ks);
+ return 6;
+}
+
+int Disassembler::SubstituteBarrierField(Instruction* instr,
+ const char* format) {
+ ASSERT(format[0] == 'M');
+ USE(format);
+
+ static const char* options[4][4] = {
+ { "sy (0b0000)", "oshld", "oshst", "osh" },
+ { "sy (0b0100)", "nshld", "nshst", "nsh" },
+ { "sy (0b1000)", "ishld", "ishst", "ish" },
+ { "sy (0b1100)", "ld", "st", "sy" }
+ };
+ int domain = instr->ImmBarrierDomain();
+ int type = instr->ImmBarrierType();
+
+ AppendToOutput("%s", options[domain][type]);
+ return 1;
+}
+
+
+void Disassembler::ResetOutput() {
+ buffer_pos_ = 0;
+ buffer_[buffer_pos_] = 0;
+}
+
+
+void Disassembler::AppendToOutput(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], buffer_size_, format, args);
+ va_end(args);
+}
+
+
+void PrintDisassembler::ProcessOutput(Instruction* instr) {
+ fprintf(stream_, "0x%016" PRIx64 " %08" PRIx32 "\t\t%s\n",
+ reinterpret_cast<uint64_t>(instr), instr->InstructionBits(),
+ GetOutput());
+}
+
+} } // namespace v8::internal
+
+
+namespace disasm {
+
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+ return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ unsigned ureg = reg; // Avoid warnings about signed/unsigned comparisons.
+ if (ureg >= v8::internal::kNumberOfRegisters) {
+ return "noreg";
+ }
+ if (ureg == v8::internal::kZeroRegCode) {
+ return "xzr";
+ }
+ v8::internal::OS::SNPrintF(tmp_buffer_, "x%u", ureg);
+ return tmp_buffer_.start();
+}
+
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ UNREACHABLE(); // ARM64 does not have the concept of a byte register
+ return "nobytereg";
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ UNREACHABLE(); // ARM64 does not have any XMM registers
+ return "noxmmreg";
+}
+
+
+const char* NameConverter::NameInCode(byte* addr) const {
+ // The default name converter is called for unknown code, so we will not try
+ // to access any memory.
+ return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+class BufferDisassembler : public v8::internal::Disassembler {
+ public:
+ explicit BufferDisassembler(v8::internal::Vector<char> out_buffer)
+ : out_buffer_(out_buffer) { }
+
+ ~BufferDisassembler() { }
+
+ virtual void ProcessOutput(v8::internal::Instruction* instr) {
+ v8::internal::OS::SNPrintF(out_buffer_, "%s", GetOutput());
+ }
+
+ private:
+ v8::internal::Vector<char> out_buffer_;
+};
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+ byte* instr) {
+ v8::internal::Decoder<v8::internal::DispatchingDecoderVisitor> decoder;
+ BufferDisassembler disasm(buffer);
+ decoder.AppendVisitor(&disasm);
+
+ decoder.Decode(reinterpret_cast<v8::internal::Instruction*>(instr));
+ return v8::internal::kInstructionSize;
+}
+
+
+int Disassembler::ConstantPoolSizeAt(byte* instr) {
+ return v8::internal::Assembler::ConstantPoolSizeAt(
+ reinterpret_cast<v8::internal::Instruction*>(instr));
+}
+
+
+void Disassembler::Disassemble(FILE* file, byte* start, byte* end) {
+ v8::internal::Decoder<v8::internal::DispatchingDecoderVisitor> decoder;
+ v8::internal::PrintDisassembler disasm(file);
+ decoder.AppendVisitor(&disasm);
+
+ for (byte* pc = start; pc < end; pc += v8::internal::kInstructionSize) {
+ decoder.Decode(reinterpret_cast<v8::internal::Instruction*>(pc));
+ }
+}
+
+} // namespace disasm
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/disasm-arm64.h b/deps/v8/src/arm64/disasm-arm64.h
new file mode 100644
index 000000000..8c964a890
--- /dev/null
+++ b/deps/v8/src/arm64/disasm-arm64.h
@@ -0,0 +1,115 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_DISASM_ARM64_H
+#define V8_ARM64_DISASM_ARM64_H
+
+#include "v8.h"
+
+#include "globals.h"
+#include "utils.h"
+#include "instructions-arm64.h"
+#include "decoder-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+
+class Disassembler: public DecoderVisitor {
+ public:
+ Disassembler();
+ Disassembler(char* text_buffer, int buffer_size);
+ virtual ~Disassembler();
+ char* GetOutput();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ protected:
+ virtual void ProcessOutput(Instruction* instr);
+
+ void Format(Instruction* instr, const char* mnemonic, const char* format);
+ void Substitute(Instruction* instr, const char* string);
+ int SubstituteField(Instruction* instr, const char* format);
+ int SubstituteRegisterField(Instruction* instr, const char* format);
+ int SubstituteImmediateField(Instruction* instr, const char* format);
+ int SubstituteLiteralField(Instruction* instr, const char* format);
+ int SubstituteBitfieldImmediateField(Instruction* instr, const char* format);
+ int SubstituteShiftField(Instruction* instr, const char* format);
+ int SubstituteExtendField(Instruction* instr, const char* format);
+ int SubstituteConditionField(Instruction* instr, const char* format);
+ int SubstitutePCRelAddressField(Instruction* instr, const char* format);
+ int SubstituteBranchTargetField(Instruction* instr, const char* format);
+ int SubstituteLSRegOffsetField(Instruction* instr, const char* format);
+ int SubstitutePrefetchField(Instruction* instr, const char* format);
+ int SubstituteBarrierField(Instruction* instr, const char* format);
+
+ bool RdIsZROrSP(Instruction* instr) const {
+ return (instr->Rd() == kZeroRegCode);
+ }
+
+ bool RnIsZROrSP(Instruction* instr) const {
+ return (instr->Rn() == kZeroRegCode);
+ }
+
+ bool RmIsZROrSP(Instruction* instr) const {
+ return (instr->Rm() == kZeroRegCode);
+ }
+
+ bool RaIsZROrSP(Instruction* instr) const {
+ return (instr->Ra() == kZeroRegCode);
+ }
+
+ bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
+
+ void ResetOutput();
+ void AppendToOutput(const char* string, ...);
+
+ char* buffer_;
+ uint32_t buffer_pos_;
+ uint32_t buffer_size_;
+ bool own_buffer_;
+};
+
+
+class PrintDisassembler: public Disassembler {
+ public:
+ explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
+ ~PrintDisassembler() { }
+
+ virtual void ProcessOutput(Instruction* instr);
+
+ private:
+ FILE *stream_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_DISASM_ARM64_H
diff --git a/deps/v8/src/arm64/frames-arm64.cc b/deps/v8/src/arm64/frames-arm64.cc
new file mode 100644
index 000000000..8c1bc20ac
--- /dev/null
+++ b/deps/v8/src/arm64/frames-arm64.cc
@@ -0,0 +1,65 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "assembler.h"
+#include "assembler-arm64.h"
+#include "assembler-arm64-inl.h"
+#include "frames.h"
+
+namespace v8 {
+namespace internal {
+
+
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
+Register StubFailureTrampolineFrame::context_register() { return cp; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+ UNREACHABLE();
+ return Memory::Object_at(NULL);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/frames-arm64.h b/deps/v8/src/arm64/frames-arm64.h
new file mode 100644
index 000000000..8b5641058
--- /dev/null
+++ b/deps/v8/src/arm64/frames-arm64.h
@@ -0,0 +1,133 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "arm64/constants-arm64.h"
+#include "arm64/assembler-arm64.h"
+
+#ifndef V8_ARM64_FRAMES_ARM64_H_
+#define V8_ARM64_FRAMES_ARM64_H_
+
+namespace v8 {
+namespace internal {
+
+const int kNumRegs = kNumberOfRegisters;
+// Registers x0-x17 are caller-saved.
+const int kNumJSCallerSaved = 18;
+const RegList kJSCallerSaved = 0x3ffff;
+typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of eight.
+// TODO(all): Refine this number.
+const int kNumSafepointRegisters = 32;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+#define kSafepointSavedRegisters CPURegList::GetSafepointSavedRegisters().list()
+#define kNumSafepointSavedRegisters \
+ CPURegList::GetSafepointSavedRegisters().Count();
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ static const int kCallerFPOffset =
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+ static const int kFrameSize = 2 * kPointerSize;
+
+ static const int kCallerSPDisplacement = 2 * kPointerSize;
+ static const int kCallerPCOffset = 1 * kPointerSize;
+ static const int kCallerFPOffset = 0 * kPointerSize; // <- fp
+ static const int kSPOffset = -1 * kPointerSize;
+ static const int kCodeOffset = -2 * kPointerSize;
+ static const int kLastExitFrameField = kCodeOffset;
+
+ static const int kConstantPoolOffset = 0; // Not used
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+
+ // There are two words on the stack (saved fp and saved lr) between fp and
+ // the arguments.
+ static const int kLastParameterOffset = 2 * kPointerSize;
+
+ static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + kPointerSize;
+};
+
+
+class ConstructFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+ static const int kLengthOffset = -4 * kPointerSize;
+ static const int kConstructorOffset = -5 * kPointerSize;
+ static const int kImplicitReceiverOffset = -6 * kPointerSize;
+
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+ const int offset = JavaScriptFrameConstants::kFunctionOffset;
+ return Memory::Object_at(fp() + offset);
+}
+
+
+inline void StackHandler::SetFp(Address slot, Address fp) {
+ Memory::Address_at(slot) = fp;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_FRAMES_ARM64_H_
diff --git a/deps/v8/src/arm64/full-codegen-arm64.cc b/deps/v8/src/arm64/full-codegen-arm64.cc
new file mode 100644
index 000000000..d40e74aa2
--- /dev/null
+++ b/deps/v8/src/arm64/full-codegen-arm64.cc
@@ -0,0 +1,5015 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "code-stubs.h"
+#include "codegen.h"
+#include "compiler.h"
+#include "debug.h"
+#include "full-codegen.h"
+#include "isolate-inl.h"
+#include "parser.h"
+#include "scopes.h"
+#include "stub-cache.h"
+
+#include "arm64/code-stubs-arm64.h"
+#include "arm64/macro-assembler-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm), reg_(NoReg) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() {
+ if (patch_site_.is_bound()) {
+ ASSERT(info_emitted_);
+ } else {
+ ASSERT(reg_.IsNone());
+ }
+ }
+
+ void EmitJumpIfNotSmi(Register reg, Label* target) {
+ // This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
+ InstructionAccurateScope scope(masm_, 1);
+ ASSERT(!info_emitted_);
+ ASSERT(reg.Is64Bits());
+ ASSERT(!reg.Is(csp));
+ reg_ = reg;
+ __ bind(&patch_site_);
+ __ tbz(xzr, 0, target); // Always taken before patched.
+ }
+
+ void EmitJumpIfSmi(Register reg, Label* target) {
+ // This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
+ InstructionAccurateScope scope(masm_, 1);
+ ASSERT(!info_emitted_);
+ ASSERT(reg.Is64Bits());
+ ASSERT(!reg.Is(csp));
+ reg_ = reg;
+ __ bind(&patch_site_);
+ __ tbnz(xzr, 0, target); // Never taken before patched.
+ }
+
+ void EmitJumpIfEitherNotSmi(Register reg1, Register reg2, Label* target) {
+ UseScratchRegisterScope temps(masm_);
+ Register temp = temps.AcquireX();
+ __ Orr(temp, reg1, reg2);
+ EmitJumpIfNotSmi(temp, target);
+ }
+
+ void EmitPatchInfo() {
+ Assembler::BlockPoolsScope scope(masm_);
+ InlineSmiCheckInfo::Emit(masm_, reg_, &patch_site_);
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ }
+
+ private:
+ MacroAssembler* masm_;
+ Label patch_site_;
+ Register reg_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+
+static void EmitStackCheck(MacroAssembler* masm_,
+ int pointers = 0,
+ Register scratch = jssp) {
+ Isolate* isolate = masm_->isolate();
+ Label ok;
+ ASSERT(jssp.Is(__ StackPointer()));
+ ASSERT(scratch.Is(jssp) == (pointers == 0));
+ if (pointers != 0) {
+ __ Sub(scratch, jssp, pointers * kPointerSize);
+ }
+ __ CompareRoot(scratch, Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+ PredictableCodeSizeScope predictable(masm_,
+ Assembler::kCallSizeWithRelocation);
+ __ Call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ Bind(&ok);
+}
+
+
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right. The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+// - x1: the JS function object being called (i.e. ourselves).
+// - cp: our context.
+// - fp: our caller's frame pointer.
+// - jssp: stack pointer.
+// - lr: return address.
+//
+// The function builds a JS frame. See JavaScriptFrameConstants in
+// frames-arm.h for its layout.
+void FullCodeGenerator::Generate() {
+ CompilationInfo* info = info_;
+ handler_table_ =
+ isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ InitializeFeedbackVector();
+
+ profiling_counter_ = isolate()->factory()->NewCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+ SetFunctionPosition(function());
+ Comment cmnt(masm_, "[ Function compiled by full code generator");
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ Debug("stop-at", __LINE__, BREAK);
+ }
+#endif
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
+ int receiver_offset = info->scope()->num_parameters() * kXRegSize;
+ __ Peek(x10, receiver_offset);
+ __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
+
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+ __ Poke(x10, receiver_offset);
+
+ __ Bind(&ok);
+ }
+
+
+ // Open a frame scope to indicate that there is a frame on the stack.
+ // The MANUAL indicates that the scope shouldn't actually generate code
+ // to set up the frame because we do it manually below.
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
+ // This call emits the following sequence in a way that can be patched for
+ // code ageing support:
+ // Push(lr, fp, cp, x1);
+ // Add(fp, jssp, 2 * kPointerSize);
+ info->set_prologue_offset(masm_->pc_offset());
+ __ Prologue(BUILD_FUNCTION_FRAME);
+ info->AddNoFrameRange(0, masm_->pc_offset());
+
+ // Reserve space on the stack for locals.
+ { Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ ASSERT(!info->function()->is_generator() || locals_count == 0);
+
+ if (locals_count > 0) {
+ if (locals_count >= 128) {
+ EmitStackCheck(masm_, locals_count, x10);
+ }
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ if (FLAG_optimize_for_size) {
+ __ PushMultipleTimes(x10 , locals_count);
+ } else {
+ const int kMaxPushes = 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ Mov(x3, loop_iterations);
+ Label loop_header;
+ __ Bind(&loop_header);
+ // Do pushes.
+ __ PushMultipleTimes(x10 , kMaxPushes);
+ __ Subs(x3, x3, 1);
+ __ B(ne, &loop_header);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ __ PushMultipleTimes(x10 , remaining);
+ }
+ }
+ }
+
+ bool function_in_register_x1 = true;
+
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ // Argument to NewContext is the function, which is still in x1.
+ Comment cmnt(masm_, "[ Allocate context");
+ if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+ __ Mov(x10, Operand(info->scope()->GetScopeInfo()));
+ __ Push(x1, x10);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
+ } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ Push(x1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+ }
+ function_in_register_x1 = false;
+ // Context is returned in x0. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ Mov(cp, x0);
+ __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = info->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ Ldr(x10, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ MemOperand target = ContextMemOperand(cp, var->index());
+ __ Str(x10, target);
+
+ // Update the write barrier.
+ __ RecordWriteContextSlot(
+ cp, target.offset(), x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+ }
+ }
+
+ Variable* arguments = scope()->arguments();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register_x1) {
+ // Load this again, if it's used by the local context below.
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ Mov(x3, x1);
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset + offset);
+ __ Mov(x1, Smi::FromInt(num_parameters));
+ __ Push(x3, x2, x1);
+
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiver and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub::Type type;
+ if (strict_mode() == STRICT) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
+ }
+ ArgumentsAccessStub stub(type);
+ __ CallStub(&stub);
+
+ SetVar(arguments, x0, x1, x2);
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
+
+ } else {
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ { Comment cmnt(masm_, "[ Declarations");
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ VariableDeclaration* function = scope()->function();
+ ASSERT(function->proxy()->var()->mode() == CONST ||
+ function->proxy()->var()->mode() == CONST_LEGACY);
+ ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+ VisitVariableDeclaration(function);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ EmitStackCheck(masm_);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(function()->body());
+ ASSERT(loop_depth() == 0);
+ }
+
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
+ { Comment cmnt(masm_, "[ return <undefined>;");
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ }
+ EmitReturnSequence();
+
+ // Force emission of the pools, so they don't get emitted in the middle
+ // of the back edge table.
+ masm()->CheckVeneerPool(true, false);
+ masm()->CheckConstPool(true, false);
+}
+
+
+void FullCodeGenerator::ClearAccumulator() {
+ __ Mov(x0, Smi::FromInt(0));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+ __ Mov(x2, Operand(profiling_counter_));
+ __ Ldr(x3, FieldMemOperand(x2, Cell::kValueOffset));
+ __ Subs(x3, x3, Smi::FromInt(delta));
+ __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+ int reset_value = FLAG_interrupt_budget;
+ if (isolate()->IsDebuggerActive()) {
+ // Detect debug break requests as soon as possible.
+ reset_value = FLAG_interrupt_budget >> 4;
+ }
+ __ Mov(x2, Operand(profiling_counter_));
+ __ Mov(x3, Smi::FromInt(reset_value));
+ __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ ASSERT(jssp.Is(__ StackPointer()));
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
+ // Block literal pools whilst emitting back edge code.
+ Assembler::BlockPoolsScope block_const_pool(masm_);
+ Label ok;
+
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ int weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
+ EmitProfilingCounterDecrement(weight);
+ __ B(pl, &ok);
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
+ RecordBackEdge(stmt->OsrEntryId());
+
+ EmitProfilingCounterReset();
+
+ __ Bind(&ok);
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ // Record a mapping of the OSR id to this PC. This is used if the OSR
+ // entry becomes the target of a bailout. We don't expect it to be, but
+ // we want it to work if it is.
+ PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::EmitReturnSequence() {
+ Comment cmnt(masm_, "[ Return sequence");
+
+ if (return_label_.is_bound()) {
+ __ B(&return_label_);
+
+ } else {
+ __ Bind(&return_label_);
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in x0.
+ __ Push(result_register());
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ ASSERT(x0.Is(result_register()));
+ }
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ B(pl, &ok);
+ __ Push(x0);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(x0);
+ EmitProfilingCounterReset();
+ __ Bind(&ok);
+
+ // Make sure that the constant pool is not emitted inside of the return
+ // sequence. This sequence can get patched when the debugger is used. See
+ // debug-arm64.cc:BreakLocationIterator::SetDebugBreakAtReturn().
+ {
+ InstructionAccurateScope scope(masm_,
+ Assembler::kJSRetSequenceInstructions);
+ CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+ __ RecordJSReturn();
+ // This code is generated using Assembler methods rather than Macro
+ // Assembler methods because it will be patched later on, and so the size
+ // of the generated code must be consistent.
+ const Register& current_sp = __ StackPointer();
+ // Nothing ensures 16 bytes alignment here.
+ ASSERT(!current_sp.Is(csp));
+ __ mov(current_sp, fp);
+ int no_frame_start = masm_->pc_offset();
+ __ ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSize, PostIndex));
+ // Drop the arguments and receiver and return.
+ // TODO(all): This implementation is overkill as it supports 2**31+1
+ // arguments, consider how to improve it without creating a security
+ // hole.
+ __ LoadLiteral(ip0, 3 * kInstructionSize);
+ __ add(current_sp, current_sp, ip0);
+ __ ret();
+ __ dc64(kXRegSize * (info_->scope()->num_parameters() + 1));
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+ __ Push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Variable* var) const {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ // For simplicity we always test the accumulator register.
+ codegen()->GetVar(result_register(), var);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
+ // Root values have no side effects.
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+ __ Push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+ false_label_);
+ if (index == Heap::kUndefinedValueRootIndex ||
+ index == Heap::kNullValueRootIndex ||
+ index == Heap::kFalseValueRootIndex) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else if (index == Heap::kTrueValueRootIndex) {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ } else {
+ __ LoadRoot(result_register(), index);
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Handle<Object> lit) const {
+ __ Mov(result_register(), Operand(lit));
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ // Immediates cannot be pushed directly.
+ __ Mov(result_register(), Operand(lit));
+ __ Push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
+ true,
+ true_label_,
+ false_label_);
+ ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else if (lit->IsTrue() || lit->IsJSObject()) {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ } else if (lit->IsString()) {
+ if (String::cast(*lit)->length() == 0) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ }
+ } else if (lit->IsSmi()) {
+ if (Smi::cast(*lit)->value() == 0) {
+ if (false_label_ != fall_through_) __ B(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ B(true_label_);
+ }
+ } else {
+ // For simplicity we always test the accumulator register.
+ __ Mov(result_register(), Operand(lit));
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+ int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+ __ Move(result_register(), reg);
+}
+
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ if (count > 1) __ Drop(count - 1);
+ __ Poke(reg, 0);
+}
+
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ // For simplicity we always test the accumulator register.
+ __ Drop(count);
+ __ Mov(result_register(), reg);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == materialize_false);
+ __ Bind(materialize_true);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ Bind(materialize_true);
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ B(&done);
+ __ Bind(materialize_false);
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ Bind(&done);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ Bind(materialize_true);
+ __ LoadRoot(x10, Heap::kTrueValueRootIndex);
+ __ B(&done);
+ __ Bind(materialize_false);
+ __ LoadRoot(x10, Heap::kFalseValueRootIndex);
+ __ Bind(&done);
+ __ Push(x10);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == true_label_);
+ ASSERT(materialize_false == false_label_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(result_register(), value_root_index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(x10, value_root_index);
+ __ Push(x10);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(),
+ true,
+ true_label_,
+ false_label_);
+ if (flag) {
+ if (true_label_ != fall_through_) {
+ __ B(true_label_);
+ }
+ } else {
+ if (false_label_ != fall_through_) {
+ __ B(false_label_);
+ }
+ }
+}
+
+
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(ic, condition->test_id());
+ __ CompareAndSplit(result_register(), 0, ne, if_true, if_false, fall_through);
+}
+
+
+// If (cond), branch to if_true.
+// If (!cond), branch to if_false.
+// fall_through is used as an optimization in cases where only one branch
+// instruction is necessary.
+void FullCodeGenerator::Split(Condition cond,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ B(cond, if_true);
+ } else if (if_true == fall_through) {
+ ASSERT(if_false != fall_through);
+ __ B(InvertCondition(cond), if_false);
+ } else {
+ __ B(cond, if_true);
+ __ B(if_false);
+ }
+}
+
+
+MemOperand FullCodeGenerator::StackOperand(Variable* var) {
+ // Offset is negative because higher indexes are at lower addresses.
+ int offset = -var->index() * kXRegSize;
+ // Adjust by a (parameter or local) base offset.
+ if (var->IsParameter()) {
+ offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+ } else {
+ offset += JavaScriptFrameConstants::kLocal0Offset;
+ }
+ return MemOperand(fp, offset);
+}
+
+
+MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
+ ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ if (var->IsContextSlot()) {
+ int context_chain_length = scope()->ContextChainLength(var->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return ContextMemOperand(scratch, var->index());
+ } else {
+ return StackOperand(var);
+ }
+}
+
+
+void FullCodeGenerator::GetVar(Register dest, Variable* var) {
+ // Use destination as scratch.
+ MemOperand location = VarOperand(var, dest);
+ __ Ldr(dest, location);
+}
+
+
+void FullCodeGenerator::SetVar(Variable* var,
+ Register src,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+ ASSERT(!AreAliased(src, scratch0, scratch1));
+ MemOperand location = VarOperand(var, scratch0);
+ __ Str(src, location);
+
+ // Emit the write barrier code if the location is in the heap.
+ if (var->IsContextSlot()) {
+ // scratch0 contains the correct context.
+ __ RecordWriteContextSlot(scratch0,
+ location.offset(),
+ src,
+ scratch1,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
+ bool should_normalize,
+ Label* if_true,
+ Label* if_false) {
+ // Only prepare for bailouts before splits if we're in a test
+ // context. Otherwise, we let the Visit function deal with the
+ // preparation to avoid preparing with the same AST id twice.
+ if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+ // TODO(all): Investigate to see if there is something to work on here.
+ Label skip;
+ if (should_normalize) {
+ __ B(&skip);
+ }
+ PrepareForBailout(expr, TOS_REG);
+ if (should_normalize) {
+ __ CompareRoot(x0, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, NULL);
+ __ Bind(&skip);
+ }
+}
+
+
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+ // The variable in the declaration always resides in the current function
+ // context.
+ ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+ if (generate_debug_code_) {
+ // Check that we're not inside a with or catch context.
+ __ Ldr(x1, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ CompareRoot(x1, Heap::kWithContextMapRootIndex);
+ __ Check(ne, kDeclarationInWithContext);
+ __ CompareRoot(x1, Heap::kCatchContextMapRootIndex);
+ __ Check(ne, kDeclarationInCatchContext);
+ }
+}
+
+
+void FullCodeGenerator::VisitVariableDeclaration(
+ VariableDeclaration* declaration) {
+ // If it was not possible to allocate the variable at compile time, we
+ // need to "declare" it at runtime to make sure it actually exists in the
+ // local context.
+ VariableProxy* proxy = declaration->proxy();
+ VariableMode mode = declaration->mode();
+ Variable* variable = proxy->var();
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ globals_->Add(variable->name(), zone());
+ globals_->Add(variable->binding_needs_init()
+ ? isolate()->factory()->the_hole_value()
+ : isolate()->factory()->undefined_value(),
+ zone());
+ break;
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ Str(x10, StackOperand(variable));
+ }
+ break;
+
+ case Variable::CONTEXT:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ Str(x10, ContextMemOperand(cp, variable->index()));
+ // No write barrier since the_hole_value is in old space.
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ }
+ break;
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ Mov(x2, Operand(variable->name()));
+ // Declaration nodes are always introduced in one of four modes.
+ ASSERT(IsDeclaredVariableMode(mode));
+ PropertyAttributes attr = IsImmutableVariableMode(mode) ? READ_ONLY
+ : NONE;
+ __ Mov(x1, Smi::FromInt(attr));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (hole_init) {
+ __ LoadRoot(x0, Heap::kTheHoleValueRootIndex);
+ __ Push(cp, x2, x1, x0);
+ } else {
+ // Pushing 0 (xzr) indicates no initial value.
+ __ Push(cp, x2, x1, xzr);
+ }
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitFunctionDeclaration(
+ FunctionDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
+ Handle<SharedFunctionInfo> function =
+ Compiler::BuildFunctionInfo(declaration->fun(), script());
+ // Check for stack overflow exception.
+ if (function.is_null()) return SetStackOverflow();
+ globals_->Add(function, zone());
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL: {
+ Comment cmnt(masm_, "[ Function Declaration");
+ VisitForAccumulatorValue(declaration->fun());
+ __ Str(result_register(), StackOperand(variable));
+ break;
+ }
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ Function Declaration");
+ EmitDebugCheckDeclarationContext(variable);
+ VisitForAccumulatorValue(declaration->fun());
+ __ Str(result_register(), ContextMemOperand(cp, variable->index()));
+ int offset = Context::SlotOffset(variable->index());
+ // We know that we have written a function, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ offset,
+ result_register(),
+ x2,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Function Declaration");
+ __ Mov(x2, Operand(variable->name()));
+ __ Mov(x1, Smi::FromInt(NONE));
+ __ Push(cp, x2, x1);
+ // Push initial value for function declaration.
+ VisitForStackValue(declaration->fun());
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+ Variable* variable = declaration->proxy()->var();
+ ASSERT(variable->location() == Variable::CONTEXT);
+ ASSERT(variable->interface()->IsFrozen());
+
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+
+ // Load instance object.
+ __ LoadContext(x1, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ Ldr(x1, ContextMemOperand(x1, variable->interface()->Index()));
+ __ Ldr(x1, ContextMemOperand(x1, Context::EXTENSION_INDEX));
+
+ // Assign it.
+ __ Str(x1, ContextMemOperand(cp, variable->index()));
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ Context::SlotOffset(variable->index()),
+ x1,
+ x3,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse info body.
+ Visit(declaration->module());
+}
+
+
+void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED:
+ // TODO(rossberg)
+ break;
+
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ ImportDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ // TODO(rossberg)
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::LOOKUP:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
+ // TODO(rossberg)
+}
+
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals.
+ __ Mov(x11, Operand(pairs));
+ Register flags = xzr;
+ if (Smi::FromInt(DeclareGlobalsFlags())) {
+ flags = x10;
+ __ Mov(flags, Smi::FromInt(DeclareGlobalsFlags()));
+ }
+ __ Push(cp, x11, flags);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ ASM_LOCATION("FullCodeGenerator::VisitSwitchStatement");
+ Comment cmnt(masm_, "[ SwitchStatement");
+ Breakable nested_statement(this, stmt);
+ SetStatementPosition(stmt);
+
+ // Keep the switch value on the stack until a case matches.
+ VisitForStackValue(stmt->tag());
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ CaseClause* default_clause = NULL; // Can occur anywhere in the list.
+
+ Label next_test; // Recycled for each test.
+ // Compile all the tests with branches to their bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ clause->body_target()->Unuse();
+
+ // The default is not a test, but remember it as final fall through.
+ if (clause->is_default()) {
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case comparison");
+ __ Bind(&next_test);
+ next_test.Unuse();
+
+ // Compile the label expression.
+ VisitForAccumulatorValue(clause->label());
+
+ // Perform the comparison as if via '==='.
+ __ Peek(x1, 0); // Switch value.
+
+ JumpPatchSite patch_site(masm_);
+ if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
+ Label slow_case;
+ patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
+ __ Cmp(x1, x0);
+ __ B(ne, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ B(clause->body_target());
+ __ Bind(&slow_case);
+ }
+
+ // Record position before stub call for type feedback.
+ SetSourcePosition(clause->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+ CallIC(ic, clause->CompareId());
+ patch_site.EmitPatchInfo();
+
+ Label skip;
+ __ B(&skip);
+ PrepareForBailout(clause, TOS_REG);
+ __ JumpIfNotRoot(x0, Heap::kTrueValueRootIndex, &next_test);
+ __ Drop(1);
+ __ B(clause->body_target());
+ __ Bind(&skip);
+
+ __ Cbnz(x0, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ B(clause->body_target());
+ }
+
+ // Discard the test value and jump to the default if present, otherwise to
+ // the end of the statement.
+ __ Bind(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ if (default_clause == NULL) {
+ __ B(nested_statement.break_label());
+ } else {
+ __ B(default_clause->body_target());
+ }
+
+ // Compile all the case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ Comment cmnt(masm_, "[ Case body");
+ CaseClause* clause = clauses->at(i);
+ __ Bind(clause->body_target());
+ PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ VisitStatements(clause->statements());
+ }
+
+ __ Bind(nested_statement.break_label());
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ ASM_LOCATION("FullCodeGenerator::VisitForInStatement");
+ Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
+ // TODO(all): This visitor probably needs better comments and a revisit.
+ SetStatementPosition(stmt);
+
+ Label loop, exit;
+ ForIn loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // Get the object to enumerate over. If the object is null or undefined, skip
+ // over the loop. See ECMA-262 version 5, section 12.6.4.
+ VisitForAccumulatorValue(stmt->enumerable());
+ __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit);
+ Register null_value = x15;
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ Cmp(x0, null_value);
+ __ B(eq, &exit);
+
+ PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+
+ // Convert the object to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(x0, &convert);
+ __ JumpIfObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE, &done_convert, ge);
+ __ Bind(&convert);
+ __ Push(x0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Bind(&done_convert);
+ __ Push(x0);
+
+ // Check for proxies.
+ Label call_runtime;
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ JumpIfObjectType(x0, x10, x11, LAST_JS_PROXY_TYPE, &call_runtime, le);
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ __ CheckEnumCache(x0, null_value, x10, x11, x12, x13, &call_runtime);
+
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ Label use_cache;
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ B(&use_cache);
+
+ // Get the set of properties to enumerate.
+ __ Bind(&call_runtime);
+ __ Push(x0); // Duplicate the enumerable object on the stack.
+ __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
+ Label fixed_array, no_descriptors;
+ __ Ldr(x2, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x2, Heap::kMetaMapRootIndex, &fixed_array);
+
+ // We got a map in register x0. Get the enumeration cache from it.
+ __ Bind(&use_cache);
+
+ __ EnumLengthUntagged(x1, x0);
+ __ Cbz(x1, &no_descriptors);
+
+ __ LoadInstanceDescriptors(x0, x2);
+ __ Ldr(x2, FieldMemOperand(x2, DescriptorArray::kEnumCacheOffset));
+ __ Ldr(x2,
+ FieldMemOperand(x2, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ // Set up the four remaining stack slots.
+ __ Push(x0); // Map.
+ __ Mov(x0, Smi::FromInt(0));
+ // Push enumeration cache, enumeration cache length (as smi) and zero.
+ __ SmiTag(x1);
+ __ Push(x2, x1, x0);
+ __ B(&loop);
+
+ __ Bind(&no_descriptors);
+ __ Drop(1);
+ __ B(&exit);
+
+ // We got a fixed array in register x0. Iterate through that.
+ __ Bind(&fixed_array);
+
+ Handle<Object> feedback = Handle<Object>(
+ Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
+ isolate());
+ StoreFeedbackVectorSlot(slot, feedback);
+ __ LoadObject(x1, FeedbackVector());
+ __ Mov(x10, Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker));
+ __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot)));
+
+ __ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check.
+ __ Peek(x10, 0); // Get enumerated object.
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ // TODO(all): similar check was done already. Can we avoid it here?
+ __ CompareObjectType(x10, x11, x12, LAST_JS_PROXY_TYPE);
+ ASSERT(Smi::FromInt(0) == 0);
+ __ CzeroX(x1, le); // Zero indicates proxy.
+ __ Push(x1, x0); // Smi and array
+ __ Ldr(x1, FieldMemOperand(x0, FixedArray::kLengthOffset));
+ __ Push(x1, xzr); // Fixed array length (as smi) and initial index.
+
+ // Generate code for doing the condition check.
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ __ Bind(&loop);
+ // Load the current count to x0, load the length to x1.
+ __ PeekPair(x0, x1, 0);
+ __ Cmp(x0, x1); // Compare to the array length.
+ __ B(hs, loop_statement.break_label());
+
+ // Get the current entry of the array into register r3.
+ __ Peek(x10, 2 * kXRegSize);
+ __ Add(x10, x10, Operand::UntagSmiAndScale(x0, kPointerSizeLog2));
+ __ Ldr(x3, MemOperand(x10, FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the expected map from the stack or a smi in the
+ // permanent slow case into register x10.
+ __ Peek(x2, 3 * kXRegSize);
+
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we may have to filter the key.
+ Label update_each;
+ __ Peek(x1, 4 * kXRegSize);
+ __ Ldr(x11, FieldMemOperand(x1, HeapObject::kMapOffset));
+ __ Cmp(x11, x2);
+ __ B(eq, &update_each);
+
+ // For proxies, no filtering is done.
+ // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Cbz(x2, &update_each);
+
+ // Convert the entry to a string or (smi) 0 if it isn't a property
+ // any more. If the property has been removed while iterating, we
+ // just skip it.
+ __ Push(x1, x3);
+ __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ Mov(x3, x0);
+ __ Cbz(x0, loop_statement.continue_label());
+
+ // Update the 'each' property or variable from the possibly filtered
+ // entry in register x3.
+ __ Bind(&update_each);
+ __ Mov(result_register(), x3);
+ // Perform the assignment as if via '='.
+ { EffectContext context(this);
+ EmitAssignment(stmt->each());
+ }
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Generate code for going to the next element by incrementing
+ // the index (smi) stored on top of the stack.
+ __ Bind(loop_statement.continue_label());
+ // TODO(all): We could use a callee saved register to avoid popping.
+ __ Pop(x0);
+ __ Add(x0, x0, Smi::FromInt(1));
+ __ Push(x0);
+
+ EmitBackEdgeBookkeeping(stmt, &loop);
+ __ B(&loop);
+
+ // Remove the pointers stored on the stack.
+ __ Bind(loop_statement.break_label());
+ __ Drop(5);
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ Bind(&exit);
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+ Comment cmnt(masm_, "[ ForOfStatement");
+ SetStatementPosition(stmt);
+
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // var iterator = iterable[@@iterator]()
+ VisitForAccumulatorValue(stmt->assign_iterator());
+
+ // As with for-in, skip the loop if the iterator is null or undefined.
+ Register iterator = x0;
+ __ JumpIfRoot(iterator, Heap::kUndefinedValueRootIndex,
+ loop_statement.break_label());
+ __ JumpIfRoot(iterator, Heap::kNullValueRootIndex,
+ loop_statement.break_label());
+
+ // Convert the iterator to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(iterator, &convert);
+ __ CompareObjectType(iterator, x1, x1, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, &done_convert);
+ __ Bind(&convert);
+ __ Push(iterator);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Bind(&done_convert);
+ __ Push(iterator);
+
+ // Loop entry.
+ __ Bind(loop_statement.continue_label());
+
+ // result = iterator.next()
+ VisitForEffect(stmt->next_result());
+
+ // if (result.done) break;
+ Label result_not_done;
+ VisitForControl(stmt->result_done(),
+ loop_statement.break_label(),
+ &result_not_done,
+ &result_not_done);
+ __ Bind(&result_not_done);
+
+ // each = result.value
+ VisitForEffect(stmt->assign_each());
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+ __ B(loop_statement.continue_label());
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ Bind(loop_statement.break_label());
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
+ bool pretenure) {
+ // Use the fast case closure allocation code that allocates in new space for
+ // nested functions that don't need literals cloning. If we're running with
+ // the --always-opt or the --prepare-always-opt flag, we need to use the
+ // runtime function so that the new function we are creating here gets a
+ // chance to have its code optimized and doesn't just get a copy of the
+ // existing unoptimized code.
+ if (!FLAG_always_opt &&
+ !FLAG_prepare_always_opt &&
+ !pretenure &&
+ scope()->is_function_scope() &&
+ info->num_literals() == 0) {
+ FastNewClosureStub stub(info->strict_mode(), info->is_generator());
+ __ Mov(x2, Operand(info));
+ __ CallStub(&stub);
+ } else {
+ __ Mov(x11, Operand(info));
+ __ LoadRoot(x10, pretenure ? Heap::kTrueValueRootIndex
+ : Heap::kFalseValueRootIndex);
+ __ Push(cp, x11, x10);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
+ }
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ EmitVariableLoad(expr);
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+ TypeofState typeof_state,
+ Label* slow) {
+ Register current = cp;
+ Register next = x10;
+ Register temp = x11;
+
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is NULL.
+ __ Ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+ }
+ // Load next context in chain.
+ __ Ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ current = next;
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions.
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s->is_eval_scope()) {
+ Label loop, fast;
+ __ Mov(next, current);
+
+ __ Bind(&loop);
+ // Terminate at native context.
+ __ Ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+ __ JumpIfRoot(temp, Heap::kNativeContextMapRootIndex, &fast);
+ // Check that extension is NULL.
+ __ Ldr(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+ // Load next context in chain.
+ __ Ldr(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
+ __ B(&loop);
+ __ Bind(&fast);
+ }
+
+ __ Ldr(x0, GlobalObjectMemOperand());
+ __ Mov(x2, Operand(var->name()));
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL
+ : CONTEXTUAL;
+ CallLoadIC(mode);
+}
+
+
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
+ Label* slow) {
+ ASSERT(var->IsContextSlot());
+ Register context = cp;
+ Register next = x10;
+ Register temp = x11;
+
+ for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is NULL.
+ __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+ }
+ __ Ldr(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ context = next;
+ }
+ }
+ // Check that last extension is NULL.
+ __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ Cbnz(temp, slow);
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an cp-based operand (the write barrier cannot be allowed to
+ // destroy the cp register).
+ return ContextMemOperand(context, var->index());
+}
+
+
+void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+ TypeofState typeof_state,
+ Label* slow,
+ Label* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (var->mode() == DYNAMIC_GLOBAL) {
+ EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
+ __ B(done);
+ } else if (var->mode() == DYNAMIC_LOCAL) {
+ Variable* local = var->local_if_not_shadowed();
+ __ Ldr(x0, ContextSlotOperandCheckExtensions(local, slow));
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
+ __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, done);
+ if (local->mode() == CONST_LEGACY) {
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ } else { // LET || CONST
+ __ Mov(x0, Operand(var->name()));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+ }
+ }
+ __ B(done);
+ }
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+ // Record position before possible IC call.
+ SetSourcePosition(proxy->position());
+ Variable* var = proxy->var();
+
+ // Three cases: global variables, lookup variables, and all other types of
+ // variables.
+ switch (var->location()) {
+ case Variable::UNALLOCATED: {
+ Comment cmnt(masm_, "Global variable");
+ // Use inline caching. Variable name is passed in x2 and the global
+ // object (receiver) in x0.
+ __ Ldr(x0, GlobalObjectMemOperand());
+ __ Mov(x2, Operand(var->name()));
+ CallLoadIC(CONTEXTUAL);
+ context()->Plug(x0);
+ break;
+ }
+
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, var->IsContextSlot()
+ ? "Context variable"
+ : "Stack variable");
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ ASSERT(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
+ } else {
+ // Check that we always have valid source position.
+ ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+ ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST_LEGACY &&
+ var->initializer_position() < proxy->position();
+ }
+
+ if (!skip_init_check) {
+ // Let and const need a read barrier.
+ GetVar(x0, var);
+ Label done;
+ __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, &done);
+ if (var->mode() == LET || var->mode() == CONST) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ Mov(x0, Operand(var->name()));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+ __ Bind(&done);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST_LEGACY);
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ Bind(&done);
+ }
+ context()->Plug(x0);
+ break;
+ }
+ }
+ context()->Plug(var);
+ break;
+ }
+
+ case Variable::LOOKUP: {
+ Label done, slow;
+ // Generate code for loading from variables potentially shadowed by
+ // eval-introduced variables.
+ EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+ __ Bind(&slow);
+ Comment cmnt(masm_, "Lookup variable");
+ __ Mov(x1, Operand(var->name()));
+ __ Push(cp, x1); // Context and name.
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+ __ Bind(&done);
+ context()->Plug(x0);
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Comment cmnt(masm_, "[ RegExpLiteral");
+ Label materialized;
+ // Registers will be used as follows:
+ // x5 = materialized value (RegExp literal)
+ // x4 = JS function, literals array
+ // x3 = literal index
+ // x2 = RegExp pattern
+ // x1 = RegExp flags
+ // x0 = RegExp literal clone
+ __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x4, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ Ldr(x5, FieldMemOperand(x4, literal_offset));
+ __ JumpIfNotRoot(x5, Heap::kUndefinedValueRootIndex, &materialized);
+
+ // Create regexp literal using runtime function.
+ // Result will be in x0.
+ __ Mov(x3, Smi::FromInt(expr->literal_index()));
+ __ Mov(x2, Operand(expr->pattern()));
+ __ Mov(x1, Operand(expr->flags()));
+ __ Push(x4, x3, x2, x1);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
+ __ Mov(x5, x0);
+
+ __ Bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+ __ Allocate(size, x0, x2, x3, &runtime_allocate, TAG_OBJECT);
+ __ B(&allocated);
+
+ __ Bind(&runtime_allocate);
+ __ Mov(x10, Smi::FromInt(size));
+ __ Push(x5, x10);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ Pop(x5);
+
+ __ Bind(&allocated);
+ // After this, registers are used as follows:
+ // x0: Newly allocated regexp.
+ // x5: Materialized regexp.
+ // x10, x11, x12: temps.
+ __ CopyFields(x0, x5, CPURegList(x10, x11, x12), size / kPointerSize);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ LoadRoot(x10, Heap::kNullValueRootIndex);
+ __ Push(x10);
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
+ Handle<FixedArray> constant_properties = expr->constant_properties();
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
+ __ Mov(x2, Smi::FromInt(expr->literal_index()));
+ __ Mov(x1, Operand(constant_properties));
+ int flags = expr->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= expr->has_function()
+ ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ __ Mov(x0, Smi::FromInt(flags));
+ int properties_count = constant_properties->length() / 2;
+ const int max_cloned_properties =
+ FastCloneShallowObjectStub::kMaximumClonedProperties;
+ if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
+ properties_count > max_cloned_properties) {
+ __ Push(x3, x2, x1, x0);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ __ CallStub(&stub);
+ }
+
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in x0.
+ bool result_saved = false;
+
+ // Mark all computed expressions that are bound to a key that
+ // is shadowed by a later occurrence of the same key. For the
+ // marked expressions, no store code is emitted.
+ expr->CalculateEmitStore(zone());
+
+ AccessorTable accessor_table(zone());
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ Push(x0); // Save result on stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->value()->IsInternalizedString()) {
+ if (property->emit_store()) {
+ VisitForAccumulatorValue(value);
+ __ Mov(x2, Operand(key->value()));
+ __ Peek(x1, 0);
+ CallStoreIC(key->LiteralFeedbackId());
+ PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ } else {
+ VisitForEffect(value);
+ }
+ break;
+ }
+ if (property->emit_store()) {
+ // Duplicate receiver on stack.
+ __ Peek(x0, 0);
+ __ Push(x0);
+ VisitForStackValue(key);
+ VisitForStackValue(value);
+ __ Mov(x0, Smi::FromInt(NONE)); // PropertyAttributes
+ __ Push(x0);
+ __ CallRuntime(Runtime::kSetProperty, 4);
+ } else {
+ VisitForEffect(key);
+ VisitForEffect(value);
+ }
+ break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ if (property->emit_store()) {
+ // Duplicate receiver on stack.
+ __ Peek(x0, 0);
+ __ Push(x0);
+ VisitForStackValue(value);
+ __ CallRuntime(Runtime::kSetPrototype, 2);
+ } else {
+ VisitForEffect(value);
+ }
+ break;
+ case ObjectLiteral::Property::GETTER:
+ accessor_table.lookup(key)->second->getter = value;
+ break;
+ case ObjectLiteral::Property::SETTER:
+ accessor_table.lookup(key)->second->setter = value;
+ break;
+ }
+ }
+
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ Peek(x10, 0); // Duplicate receiver.
+ __ Push(x10);
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ Mov(x10, Smi::FromInt(NONE));
+ __ Push(x10);
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ }
+
+ if (expr->has_function()) {
+ ASSERT(result_saved);
+ __ Peek(x0, 0);
+ __ Push(x0);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
+ if (result_saved) {
+ context()->PlugTOS();
+ } else {
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+
+ expr->BuildConstantElements(isolate());
+ int flags = (expr->depth() == 1) ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
+ ZoneList<Expression*>* subexprs = expr->values();
+ int length = subexprs->length();
+ Handle<FixedArray> constant_elements = expr->constant_elements();
+ ASSERT_EQ(2, constant_elements->length());
+ ElementsKind constant_elements_kind =
+ static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(constant_elements->get(1)));
+
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+ if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
+ __ Mov(x2, Smi::FromInt(expr->literal_index()));
+ __ Mov(x1, Operand(constant_elements));
+ if (has_fast_elements && constant_elements_values->map() ==
+ isolate()->heap()->fixed_cow_array_map()) {
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+ allocation_site_mode,
+ length);
+ __ CallStub(&stub);
+ __ IncrementCounter(
+ isolate()->counters()->cow_arrays_created_stub(), 1, x10, x11);
+ } else if ((expr->depth() > 1) || Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ Mov(x0, Smi::FromInt(flags));
+ __ Push(x3, x2, x1, x0);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
+ } else {
+ ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
+ FLAG_smi_only_arrays);
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+
+ if (has_fast_elements) {
+ mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ }
+
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
+ __ CallStub(&stub);
+ }
+
+ bool result_saved = false; // Is the result saved to the stack?
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ for (int i = 0; i < length; i++) {
+ Expression* subexpr = subexprs->at(i);
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+ if (!result_saved) {
+ __ Push(x0);
+ __ Push(Smi::FromInt(expr->literal_index()));
+ result_saved = true;
+ }
+ VisitForAccumulatorValue(subexpr);
+
+ if (IsFastObjectElementsKind(constant_elements_kind)) {
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ Peek(x6, kPointerSize); // Copy of array literal.
+ __ Ldr(x1, FieldMemOperand(x6, JSObject::kElementsOffset));
+ __ Str(result_register(), FieldMemOperand(x1, offset));
+ // Update the write barrier for the array store.
+ __ RecordWriteField(x1, offset, result_register(), x10,
+ kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
+ } else {
+ __ Mov(x3, Smi::FromInt(i));
+ StoreArrayLiteralElementStub stub;
+ __ CallStub(&stub);
+ }
+
+ PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ }
+
+ if (result_saved) {
+ __ Drop(1); // literal index
+ context()->PlugTOS();
+ } else {
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidLeftHandSide());
+
+ Comment cmnt(masm_, "[ Assignment");
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* property = expr->target()->AsProperty();
+ if (property != NULL) {
+ assign_type = (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ if (expr->is_compound()) {
+ // We need the receiver both on the stack and in the accumulator.
+ VisitForAccumulatorValue(property->obj());
+ __ Push(result_register());
+ } else {
+ VisitForStackValue(property->obj());
+ }
+ break;
+ case KEYED_PROPERTY:
+ if (expr->is_compound()) {
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
+ __ Peek(x1, 0);
+ __ Push(x0);
+ } else {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ }
+ break;
+ }
+
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
+ if (expr->is_compound()) {
+ { AccumulatorValueContext context(this);
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy());
+ PrepareForBailout(expr->target(), TOS_REG);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ }
+ }
+
+ Token::Value op = expr->binary_op();
+ __ Push(x0); // Left operand goes on the stack.
+ VisitForAccumulatorValue(expr->value());
+
+ OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
+ SetSourcePosition(expr->position() + 1);
+ AccumulatorValueContext context(this);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr->binary_operation(),
+ op,
+ mode,
+ expr->target(),
+ expr->value());
+ } else {
+ EmitBinaryOp(expr->binary_operation(), op, mode);
+ }
+
+ // Deoptimization point in case the binary operation may have side effects.
+ PrepareForBailout(expr->binary_operation(), TOS_REG);
+ } else {
+ VisitForAccumulatorValue(expr->value());
+ }
+
+ // Record source position before possible IC call.
+ SetSourcePosition(expr->position());
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ expr->op());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ __ Mov(x2, Operand(key->value()));
+ // Call load IC. It has arguments receiver and property name x0 and x2.
+ CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ // Call keyed load IC. It has arguments key and receiver in r0 and r1.
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, prop->PropertyFeedbackId());
+}
+
+
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode,
+ Expression* left_expr,
+ Expression* right_expr) {
+ Label done, both_smis, stub_call;
+
+ // Get the arguments.
+ Register left = x1;
+ Register right = x0;
+ Register result = x0;
+ __ Pop(left);
+
+ // Perform combined smi check on both operands.
+ __ Orr(x10, left, right);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(x10, &both_smis);
+
+ __ Bind(&stub_call);
+ BinaryOpICStub stub(op, mode);
+ {
+ Assembler::BlockPoolsScope scope(masm_);
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ }
+ __ B(&done);
+
+ __ Bind(&both_smis);
+ // Smi case. This code works in the same way as the smi-smi case in the type
+ // recording binary operation stub, see
+ // BinaryOpStub::GenerateSmiSmiOperation for comments.
+ // TODO(all): That doesn't exist any more. Where are the comments?
+ //
+ // The set of operations that needs to be supported here is controlled by
+ // FullCodeGenerator::ShouldInlineSmiCase().
+ switch (op) {
+ case Token::SAR:
+ __ Ubfx(right, right, kSmiShift, 5);
+ __ Asr(result, left, right);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ case Token::SHL:
+ __ Ubfx(right, right, kSmiShift, 5);
+ __ Lsl(result, left, right);
+ break;
+ case Token::SHR: {
+ Label right_not_zero;
+ __ Cbnz(right, &right_not_zero);
+ __ Tbnz(left, kXSignBit, &stub_call);
+ __ Bind(&right_not_zero);
+ __ Ubfx(right, right, kSmiShift, 5);
+ __ Lsr(result, left, right);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ }
+ case Token::ADD:
+ __ Adds(x10, left, right);
+ __ B(vs, &stub_call);
+ __ Mov(result, x10);
+ break;
+ case Token::SUB:
+ __ Subs(x10, left, right);
+ __ B(vs, &stub_call);
+ __ Mov(result, x10);
+ break;
+ case Token::MUL: {
+ Label not_minus_zero, done;
+ __ Smulh(x10, left, right);
+ __ Cbnz(x10, &not_minus_zero);
+ __ Eor(x11, left, right);
+ __ Tbnz(x11, kXSignBit, &stub_call);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Mov(result, x10);
+ __ B(&done);
+ __ Bind(&not_minus_zero);
+ __ Cls(x11, x10);
+ __ Cmp(x11, kXRegSizeInBits - kSmiShift);
+ __ B(lt, &stub_call);
+ __ SmiTag(result, x10);
+ __ Bind(&done);
+ break;
+ }
+ case Token::BIT_OR:
+ __ Orr(result, left, right);
+ break;
+ case Token::BIT_AND:
+ __ And(result, left, right);
+ break;
+ case Token::BIT_XOR:
+ __ Eor(result, left, right);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode) {
+ __ Pop(x1);
+ BinaryOpICStub stub(op, mode);
+ JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
+ {
+ Assembler::BlockPoolsScope scope(masm_);
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ }
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
+ ASSERT(expr->IsValidLeftHandSide());
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->AsProperty();
+ if (prop != NULL) {
+ assign_type = (prop->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* var = expr->AsVariableProxy()->var();
+ EffectContext context(this);
+ EmitVariableAssignment(var, Token::ASSIGN);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ __ Push(x0); // Preserve value.
+ VisitForAccumulatorValue(prop->obj());
+ // TODO(all): We could introduce a VisitForRegValue(reg, expr) to avoid
+ // this copy.
+ __ Mov(x1, x0);
+ __ Pop(x0); // Restore value.
+ __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
+ CallStoreIC();
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ Push(x0); // Preserve value.
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ Mov(x1, x0);
+ __ Pop(x2, x0);
+ Handle<Code> ic = strict_mode() == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic);
+ break;
+ }
+ }
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ Str(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ Mov(x10, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ x1, offset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ Mov(x11, Operand(name));
+ __ Mov(x10, Smi::FromInt(strict_mode));
+ // jssp[0] : mode.
+ // jssp[8] : name.
+ // jssp[16] : context.
+ // jssp[24] : value.
+ __ Push(x0, cp, x11, x10);
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+ Token::Value op) {
+ ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
+ if (var->IsUnallocated()) {
+ // Global var, const, or let.
+ __ Mov(x2, Operand(var->name()));
+ __ Ldr(x1, GlobalObjectMemOperand());
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
+ // Const initializers need a write barrier.
+ ASSERT(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
+ __ Push(x0);
+ __ Mov(x0, Operand(var->name()));
+ __ Push(cp, x0); // Context and name.
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackLocal() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, x1);
+ __ Ldr(x10, location);
+ __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &skip);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ Bind(&skip);
+ }
+
+ } else if (var->mode() == LET && op != Token::INIT_LET) {
+ // Non-initializing assignment to let variable needs a write barrier.
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ Label assign;
+ MemOperand location = VarOperand(var, x1);
+ __ Ldr(x10, location);
+ __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign);
+ __ Mov(x10, Operand(var->name()));
+ __ Push(x10);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+ // Perform the assignment.
+ __ Bind(&assign);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
+
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ // Assignment to var or initializing assignment to let/const
+ // in harmony mode.
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ MemOperand location = VarOperand(var, x1);
+ if (FLAG_debug_code && op == Token::INIT_LET) {
+ __ Ldr(x10, location);
+ __ CompareRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ Check(eq, kLetBindingReInitialization);
+ }
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
+ }
+ // Non-initializing assignments to consts are ignored.
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitNamedPropertyAssignment");
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
+ __ Pop(x1);
+
+ CallStoreIC(expr->AssignmentFeedbackId());
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitKeyedPropertyAssignment");
+ // Assignment to a property, using a keyed store IC.
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ // TODO(all): Could we pass this in registers rather than on the stack?
+ __ Pop(x1, x2); // Key and object holding the property.
+
+ Handle<Code> ic = strict_mode() == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic, expr->AssignmentFeedbackId());
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+
+ if (key->IsPropertyName()) {
+ VisitForAccumulatorValue(expr->obj());
+ EmitNamedPropertyLoad(expr);
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(x0);
+ } else {
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
+ __ Pop(x1);
+ EmitKeyedPropertyLoad(expr);
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::CallIC(Handle<Code> code,
+ TypeFeedbackId ast_id) {
+ ic_total_count_++;
+ // All calls must have a predictable size in full-codegen code to ensure that
+ // the debugger can patch them correctly.
+ __ Call(code, RelocInfo::CODE_TARGET, ast_id);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithIC(Call* expr) {
+ ASM_LOCATION("EmitCallWithIC");
+
+ Expression* callee = expr->expression();
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ CallFunctionFlags flags;
+ // Get the target function.
+ if (callee->IsVariableProxy()) {
+ { StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
+ }
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a sloppy mode method.
+ __ Push(isolate()->factory()->undefined_value());
+ flags = NO_CALL_FUNCTION_FLAGS;
+ } else {
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ Peek(x0, 0);
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ Pop(x10);
+ __ Push(x0, x10);
+ flags = CALL_AS_METHOD;
+ }
+
+ // Load the arguments.
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, flags);
+ __ Peek(x1, (arg_count + 1) * kPointerSize);
+ __ CallStub(&stub);
+
+ RecordJSReturnSite(expr);
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, x0);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
+ Expression* key) {
+ // Load the key.
+ VisitForAccumulatorValue(key);
+
+ Expression* callee = expr->expression();
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ // Load the function from the receiver.
+ ASSERT(callee->IsProperty());
+ __ Peek(x1, 0);
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ Pop(x10);
+ __ Push(x0, x10);
+
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, CALL_AS_METHOD);
+ __ Peek(x1, (arg_count + 1) * kPointerSize);
+ __ CallStub(&stub);
+
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, x0);
+}
+
+
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+ // Code common for calls using the call stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+
+ Handle<Object> uninitialized =
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
+ __ LoadObject(x2, FeedbackVector());
+ __ Mov(x3, Smi::FromInt(expr->CallFeedbackSlot()));
+
+ // Record call targets in unoptimized code.
+ CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
+ __ Peek(x1, (arg_count + 1) * kXRegSize);
+ __ CallStub(&stub);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, x0);
+}
+
+
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+ ASM_LOCATION("FullCodeGenerator::EmitResolvePossiblyDirectEval");
+ // Prepare to push a copy of the first argument or undefined if it doesn't
+ // exist.
+ if (arg_count > 0) {
+ __ Peek(x10, arg_count * kXRegSize);
+ } else {
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ }
+
+ // Prepare to push the receiver of the enclosing function.
+ int receiver_offset = 2 + info_->scope()->num_parameters();
+ __ Ldr(x11, MemOperand(fp, receiver_offset * kPointerSize));
+
+ // Push.
+ __ Push(x10, x11);
+
+ // Prepare to push the language mode.
+ __ Mov(x10, Smi::FromInt(strict_mode()));
+ // Prepare to push the start position of the scope the calls resides in.
+ __ Mov(x11, Smi::FromInt(scope()->start_position()));
+
+ // Push.
+ __ Push(x10, x11);
+
+ // Do the runtime call.
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
+}
+
+
+void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+ // We want to verify that RecordJSReturnSite gets called on all paths
+ // through this function. Avoid early returns.
+ expr->return_is_recorded_ = false;
+#endif
+
+ Comment cmnt(masm_, "[ Call");
+ Expression* callee = expr->expression();
+ Call::CallType call_type = expr->GetCallType(isolate());
+
+ if (call_type == Call::POSSIBLY_EVAL_CALL) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ {
+ PreservePositionScope pos_scope(masm()->positions_recorder());
+ VisitForStackValue(callee);
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ Push(x10); // Reserved receiver slot.
+
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ Peek(x10, (arg_count + 1) * kPointerSize);
+ __ Push(x10);
+ EmitResolvePossiblyDirectEval(arg_count);
+
+ // The runtime call returns a pair of values in x0 (function) and
+ // x1 (receiver). Touch up the stack with the right values.
+ __ PokePair(x1, x0, arg_count * kPointerSize);
+ }
+
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+
+ // Call the evaluated function.
+ CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ Peek(x1, (arg_count + 1) * kXRegSize);
+ __ CallStub(&stub);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, x0);
+
+ } else if (call_type == Call::GLOBAL_CALL) {
+ EmitCallWithIC(expr);
+
+ } else if (call_type == Call::LOOKUP_SLOT_CALL) {
+ // Call to a lookup slot (dynamically introduced variable).
+ VariableProxy* proxy = callee->AsVariableProxy();
+ Label slow, done;
+
+ { PreservePositionScope scope(masm()->positions_recorder());
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
+ }
+
+ __ Bind(&slow);
+ // Call the runtime to find the function to call (returned in x0)
+ // and the object holding it (returned in x1).
+ __ Push(context_register());
+ __ Mov(x10, Operand(proxy->name()));
+ __ Push(x10);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+ __ Push(x0, x1); // Receiver, function.
+
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ Label call;
+ __ B(&call);
+ __ Bind(&done);
+ // Push function.
+ __ Push(x0);
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the undefined to the call function stub.
+ __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
+ __ Push(x1);
+ __ Bind(&call);
+ }
+
+ // The receiver is either the global receiver or an object found
+ // by LoadContextSlot.
+ EmitCallWithStub(expr);
+ } else if (call_type == Call::PROPERTY_CALL) {
+ Property* property = callee->AsProperty();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(property->obj());
+ }
+ if (property->key()->IsPropertyName()) {
+ EmitCallWithIC(expr);
+ } else {
+ EmitKeyedCallWithIC(expr, property->key());
+ }
+
+ } else {
+ ASSERT(call_type == Call::OTHER_CALL);
+ // Call to an arbitrary expression not handled specially above.
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(callee);
+ }
+ __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
+ __ Push(x1);
+ // Emit function call.
+ EmitCallWithStub(expr);
+ }
+
+#ifdef DEBUG
+ // RecordJSReturnSite should have been called.
+ ASSERT(expr->return_is_recorded_);
+#endif
+}
+
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
+ VisitForStackValue(expr->expression());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into x1 and x0.
+ __ Mov(x0, arg_count);
+ __ Peek(x1, arg_count * kXRegSize);
+
+ // Record call targets in unoptimized code.
+ Handle<Object> uninitialized =
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
+ if (FLAG_pretenuring_call_new) {
+ StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(),
+ isolate()->factory()->NewAllocationSite());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ LoadObject(x2, FeedbackVector());
+ __ Mov(x3, Smi::FromInt(expr->CallNewFeedbackSlot()));
+
+ CallConstructStub stub(RECORD_CALL_TARGET);
+ __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
+ PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ TestAndSplit(x0, kSmiTagMask, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ TestAndSplit(x0, kSmiTagMask | (0x80000000UL << kSmiShift), if_true,
+ if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
+ __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ Ldrb(x11, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ Tbnz(x11, Map::kIsUndetectable, if_false);
+ __ Ldrb(x12, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+ __ Cmp(x12, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ B(lt, if_false);
+ __ Cmp(x12, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(le, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(ge, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitIsUndetectableObject");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(x11, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ Tst(x11, 1 << Map::kIsUndetectable);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(ne, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+ CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false, skip_lookup;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Register object = x0;
+ __ AssertNotSmi(object);
+
+ Register map = x10;
+ Register bitfield2 = x11;
+ __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Ldrb(bitfield2, FieldMemOperand(map, Map::kBitField2Offset));
+ __ Tbnz(bitfield2, Map::kStringWrapperSafeForDefaultValueOf, &skip_lookup);
+
+ // Check for fast case object. Generate false result for slow case object.
+ Register props = x12;
+ Register props_map = x12;
+ Register hash_table_map = x13;
+ __ Ldr(props, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ Ldr(props_map, FieldMemOperand(props, HeapObject::kMapOffset));
+ __ LoadRoot(hash_table_map, Heap::kHashTableMapRootIndex);
+ __ Cmp(props_map, hash_table_map);
+ __ B(eq, if_false);
+
+ // Look for valueOf name in the descriptor array, and indicate false if found.
+ // Since we omit an enumeration index check, if it is added via a transition
+ // that shares its descriptor array, this is a false positive.
+ Label loop, done;
+
+ // Skip loop if no descriptors are valid.
+ Register descriptors = x12;
+ Register descriptors_length = x13;
+ __ NumberOfOwnDescriptors(descriptors_length, map);
+ __ Cbz(descriptors_length, &done);
+
+ __ LoadInstanceDescriptors(map, descriptors);
+
+ // Calculate the end of the descriptor array.
+ Register descriptors_end = x14;
+ __ Mov(x15, DescriptorArray::kDescriptorSize);
+ __ Mul(descriptors_length, descriptors_length, x15);
+ // Calculate location of the first key name.
+ __ Add(descriptors, descriptors,
+ DescriptorArray::kFirstOffset - kHeapObjectTag);
+ // Calculate the end of the descriptor array.
+ __ Add(descriptors_end, descriptors,
+ Operand(descriptors_length, LSL, kPointerSizeLog2));
+
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // string "valueOf" the result is false.
+ Register valueof_string = x1;
+ int descriptor_size = DescriptorArray::kDescriptorSize * kPointerSize;
+ __ Mov(valueof_string, Operand(isolate()->factory()->value_of_string()));
+ __ Bind(&loop);
+ __ Ldr(x15, MemOperand(descriptors, descriptor_size, PostIndex));
+ __ Cmp(x15, valueof_string);
+ __ B(eq, if_false);
+ __ Cmp(descriptors, descriptors_end);
+ __ B(ne, &loop);
+
+ __ Bind(&done);
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ Ldrb(x2, FieldMemOperand(map, Map::kBitField2Offset));
+ __ Orr(x2, x2, 1 << Map::kStringWrapperSafeForDefaultValueOf);
+ __ Strb(x2, FieldMemOperand(map, Map::kBitField2Offset));
+
+ __ Bind(&skip_lookup);
+
+ // If a valueOf property is not found on the object check that its prototype
+ // is the unmodified String prototype. If not result is false.
+ Register prototype = x1;
+ Register global_idx = x2;
+ Register native_context = x2;
+ Register string_proto = x3;
+ Register proto_map = x4;
+ __ Ldr(prototype, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ JumpIfSmi(prototype, if_false);
+ __ Ldr(proto_map, FieldMemOperand(prototype, HeapObject::kMapOffset));
+ __ Ldr(global_idx, GlobalObjectMemOperand());
+ __ Ldr(native_context,
+ FieldMemOperand(global_idx, GlobalObject::kNativeContextOffset));
+ __ Ldr(string_proto,
+ ContextMemOperand(native_context,
+ Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ Cmp(proto_map, string_proto);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, JS_FUNCTION_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Only a HeapNumber can be -0.0, so return false if we have something else.
+ __ CheckMap(x0, x1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
+
+ // Test the bit pattern.
+ __ Ldr(x10, FieldMemOperand(x0, HeapNumber::kValueOffset));
+ __ Cmp(x10, 1); // Set V on 0x8000000000000000.
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(vs, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, JS_ARRAY_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x10, x11, JS_REGEXP_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Get the frame pointer for the calling frame.
+ __ Ldr(x2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kContextOffset));
+ __ Cmp(x1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(ne, &check_frame_marker);
+ __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ Bind(&check_frame_marker);
+ __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
+ __ Cmp(x1, Smi::FromInt(StackFrame::CONSTRUCT));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ // Load the two objects into registers and perform the comparison.
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ Pop(x1);
+ __ Cmp(x0, x1);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ // ArgumentsAccessStub expects the key in x1.
+ VisitForAccumulatorValue(args->at(0));
+ __ Mov(x1, x0);
+ __ Mov(x0, Smi::FromInt(info_->scope()->num_parameters()));
+ ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+ Label exit;
+ // Get the number of formal parameters.
+ __ Mov(x0, Smi::FromInt(info_->scope()->num_parameters()));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ __ Ldr(x12, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(x13, MemOperand(x12, StandardFrameConstants::kContextOffset));
+ __ Cmp(x13, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(ne, &exit);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ Ldr(x0, MemOperand(x12, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ __ Bind(&exit);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitClassOf");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ Label done, null, function, non_function_constructor;
+
+ VisitForAccumulatorValue(args->at(0));
+
+ // If the object is a smi, we return null.
+ __ JumpIfSmi(x0, &null);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+ // Assume that there are only two callable types, and one of them is at
+ // either end of the type range for JS object types. Saves extra comparisons.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE);
+ // x10: object's map.
+ // x11: object's type.
+ __ B(lt, &null);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ __ B(eq, &function);
+
+ __ Cmp(x11, LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ __ B(eq, &function);
+ // Assume that there is no larger type.
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+ // Check if the constructor in the map is a JS function.
+ __ Ldr(x12, FieldMemOperand(x10, Map::kConstructorOffset));
+ __ JumpIfNotObjectType(x12, x13, x14, JS_FUNCTION_TYPE,
+ &non_function_constructor);
+
+ // x12 now contains the constructor function. Grab the
+ // instance class name from there.
+ __ Ldr(x13, FieldMemOperand(x12, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x0,
+ FieldMemOperand(x13, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ B(&done);
+
+ // Functions have class 'Function'.
+ __ Bind(&function);
+ __ LoadRoot(x0, Heap::kfunction_class_stringRootIndex);
+ __ B(&done);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ Bind(&non_function_constructor);
+ __ LoadRoot(x0, Heap::kObject_stringRootIndex);
+ __ B(&done);
+
+ // Non-JS objects have class null.
+ __ Bind(&null);
+ __ LoadRoot(x0, Heap::kNullValueRootIndex);
+
+ // All done.
+ __ Bind(&done);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitLog(CallRuntime* expr) {
+ // Conditionally generate a log call.
+ // Args:
+ // 0 (literal string): The type of logging (corresponds to the flags).
+ // This is used to determine whether or not to generate the log call.
+ // 1 (string): Format string. Access the string at argument index 2
+ // with '%2s' (see Logger::LogRuntime for all the formats).
+ // 2 (array): Arguments to the format string.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(args->length(), 3);
+ if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallRuntime(Runtime::kHiddenLog, 2);
+ }
+
+ // Finally, we're expected to leave a value on the top of the stack.
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ SubStringStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ RegExpExecStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 4);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ VisitForStackValue(args->at(3));
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitValueOf");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label done;
+ // If the object is a smi return the object.
+ __ JumpIfSmi(x0, &done);
+ // If the object is not a value type, return the object.
+ __ JumpIfNotObjectType(x0, x10, x11, JS_VALUE_TYPE, &done);
+ __ Ldr(x0, FieldMemOperand(x0, JSValue::kValueOffset));
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ ASSERT_NE(NULL, args->at(1)->AsLiteral());
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
+
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label runtime, done, not_date_object;
+ Register object = x0;
+ Register result = x0;
+ Register stamp_addr = x10;
+ Register stamp_cache = x11;
+
+ __ JumpIfSmi(object, &not_date_object);
+ __ JumpIfNotObjectType(object, x10, x10, JS_DATE_TYPE, &not_date_object);
+
+ if (index->value() == 0) {
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
+ __ B(&done);
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ Mov(x10, stamp);
+ __ Ldr(stamp_addr, MemOperand(x10));
+ __ Ldr(stamp_cache, FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ Cmp(stamp_addr, stamp_cache);
+ __ B(ne, &runtime);
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ B(&done);
+ }
+
+ __ Bind(&runtime);
+ __ Mov(x1, index);
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ B(&done);
+ }
+
+ __ Bind(&not_date_object);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = x0;
+ Register index = x1;
+ Register value = x2;
+ Register scratch = x10;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(value, index);
+
+ if (FLAG_debug_code) {
+ __ AssertSmi(value, kNonSmiValue);
+ __ AssertSmi(index, kNonSmiIndex);
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch,
+ one_byte_seq_type);
+ }
+
+ __ Add(scratch, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(value);
+ __ SmiUntag(index);
+ __ Strb(value, MemOperand(scratch, index));
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ Register string = x0;
+ Register index = x1;
+ Register value = x2;
+ Register scratch = x10;
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(value, index);
+
+ if (FLAG_debug_code) {
+ __ AssertSmi(value, kNonSmiValue);
+ __ AssertSmi(index, kNonSmiIndex);
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch,
+ two_byte_seq_type);
+ }
+
+ __ Add(scratch, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(value);
+ __ SmiUntag(index);
+ __ Strh(value, MemOperand(scratch, index, LSL, 1));
+ context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
+ // Load the arguments on the stack and call the MathPow stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ MathPowStub stub(MathPowStub::ON_STACK);
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(0)); // Load the object.
+ VisitForAccumulatorValue(args->at(1)); // Load the value.
+ __ Pop(x1);
+ // x0 = value.
+ // x1 = object.
+
+ Label done;
+ // If the object is a smi, return the value.
+ __ JumpIfSmi(x1, &done);
+
+ // If the object is not a value type, return the value.
+ __ JumpIfNotObjectType(x1, x10, x11, JS_VALUE_TYPE, &done);
+
+ // Store the value.
+ __ Str(x0, FieldMemOperand(x1, JSValue::kValueOffset));
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ __ Mov(x10, x0);
+ __ RecordWriteField(
+ x1, JSValue::kValueOffset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(args->length(), 1);
+
+ // Load the argument into x0 and call the stub.
+ VisitForAccumulatorValue(args->at(0));
+
+ NumberToStringStub stub;
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label done;
+ Register code = x0;
+ Register result = x1;
+
+ StringCharFromCodeGenerator generator(code, result);
+ generator.GenerateFast(masm_);
+ __ B(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = x1;
+ Register index = x0;
+ Register result = x3;
+
+ __ Pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object,
+ index,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ B(&done);
+
+ __ Bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return NaN.
+ __ LoadRoot(result, Heap::kNanValueRootIndex);
+ __ B(&done);
+
+ __ Bind(&need_conversion);
+ // Load the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ B(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = x1;
+ Register index = x0;
+ Register result = x0;
+
+ __ Pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ x3,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ B(&done);
+
+ __ Bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
+ __ B(&done);
+
+ __ Bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger conversion.
+ __ Mov(result, Smi::FromInt(0));
+ __ B(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitStringAdd");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ __ Pop(x1);
+ StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringCompareStub stub;
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
+ // Load the argument on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ CallRuntime(Runtime::kMath_log, 1);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
+ // Load the argument on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ CallRuntime(Runtime::kMath_sqrt, 1);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitCallFunction");
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() >= 2);
+
+ int arg_count = args->length() - 2; // 2 ~ receiver and function.
+ for (int i = 0; i < arg_count + 1; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ VisitForAccumulatorValue(args->last()); // Function.
+
+ Label runtime, done;
+ // Check for non-function argument (including proxy).
+ __ JumpIfSmi(x0, &runtime);
+ __ JumpIfNotObjectType(x0, x1, x1, JS_FUNCTION_TYPE, &runtime);
+
+ // InvokeFunction requires the function in x1. Move it in there.
+ __ Mov(x1, x0);
+ ParameterCount count(arg_count);
+ __ InvokeFunction(x1, count, CALL_FUNCTION, NullCallWrapper());
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ B(&done);
+
+ __ Bind(&runtime);
+ __ Push(x0);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ Bind(&done);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
+ RegExpConstructResultStub stub;
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(2));
+ __ Pop(x1, x2);
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(2, args->length());
+ ASSERT_NE(NULL, args->at(0)->AsLiteral());
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
+
+ Handle<FixedArray> jsfunction_result_caches(
+ isolate()->native_context()->jsfunction_result_caches());
+ if (jsfunction_result_caches->length() <= cache_id) {
+ __ Abort(kAttemptToUseUndefinedCache);
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ context()->Plug(x0);
+ return;
+ }
+
+ VisitForAccumulatorValue(args->at(1));
+
+ Register key = x0;
+ Register cache = x1;
+ __ Ldr(cache, GlobalObjectMemOperand());
+ __ Ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
+ __ Ldr(cache, ContextMemOperand(cache,
+ Context::JSFUNCTION_RESULT_CACHES_INDEX));
+ __ Ldr(cache,
+ FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+ Label done;
+ __ Ldrsw(x2, UntagSmiFieldMemOperand(cache,
+ JSFunctionResultCache::kFingerOffset));
+ __ Add(x3, cache, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(x3, x3, Operand(x2, LSL, kPointerSizeLog2));
+
+ // Load the key and data from the cache.
+ __ Ldp(x2, x3, MemOperand(x3));
+
+ __ Cmp(key, x2);
+ __ CmovX(x0, x3, eq);
+ __ B(eq, &done);
+
+ // Call runtime to perform the lookup.
+ __ Push(cache, key);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset));
+ __ Tst(x10, String::kContainsCachedArrayIndexMask);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ __ AssertString(x0);
+
+ __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset));
+ __ IndexFromHash(x10, x0);
+
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitFastAsciiArrayJoin");
+
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(0));
+
+ Register array = x0;
+ Register result = x0;
+ Register elements = x1;
+ Register element = x2;
+ Register separator = x3;
+ Register array_length = x4;
+ Register result_pos = x5;
+ Register map = x6;
+ Register string_length = x10;
+ Register elements_end = x11;
+ Register string = x12;
+ Register scratch1 = x13;
+ Register scratch2 = x14;
+ Register scratch3 = x7;
+ Register separator_length = x15;
+
+ Label bailout, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop,
+ empty_separator_loop, one_char_separator_loop,
+ one_char_separator_loop_entry, long_separator_loop;
+
+ // The separator operand is on the stack.
+ __ Pop(separator);
+
+ // Check that the array is a JSArray.
+ __ JumpIfSmi(array, &bailout);
+ __ JumpIfNotObjectType(array, map, scratch1, JS_ARRAY_TYPE, &bailout);
+
+ // Check that the array has fast elements.
+ __ CheckFastElements(map, scratch1, &bailout);
+
+ // If the array has length zero, return the empty string.
+ // Load and untag the length of the array.
+ // It is an unsigned value, so we can skip sign extension.
+ // We assume little endianness.
+ __ Ldrsw(array_length,
+ UntagSmiFieldMemOperand(array, JSArray::kLengthOffset));
+ __ Cbnz(array_length, &non_trivial_array);
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
+ __ B(&done);
+
+ __ Bind(&non_trivial_array);
+ // Get the FixedArray containing array's elements.
+ __ Ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
+
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths.
+ __ Mov(string_length, 0);
+ __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
+ // Loop condition: while (element < elements_end).
+ // Live values in registers:
+ // elements: Fixed array of strings.
+ // array_length: Length of the fixed array of strings (not smi)
+ // separator: Separator string
+ // string_length: Accumulated sum of string lengths (not smi).
+ // element: Current array element.
+ // elements_end: Array end.
+ if (FLAG_debug_code) {
+ __ Cmp(array_length, 0);
+ __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+ }
+ __ Bind(&loop);
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ JumpIfSmi(string, &bailout);
+ __ Ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ Ldrsw(scratch1,
+ UntagSmiFieldMemOperand(string, SeqOneByteString::kLengthOffset));
+ __ Adds(string_length, string_length, scratch1);
+ __ B(vs, &bailout);
+ __ Cmp(element, elements_end);
+ __ B(lt, &loop);
+
+ // If array_length is 1, return elements[0], a string.
+ __ Cmp(array_length, 1);
+ __ B(ne, &not_size_one_array);
+ __ Ldr(result, FieldMemOperand(elements, FixedArray::kHeaderSize));
+ __ B(&done);
+
+ __ Bind(&not_size_one_array);
+
+ // Live values in registers:
+ // separator: Separator string
+ // array_length: Length of the array (not smi).
+ // string_length: Sum of string lengths (not smi).
+ // elements: FixedArray of strings.
+
+ // Check that the separator is a flat ASCII string.
+ __ JumpIfSmi(separator, &bailout);
+ __ Ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+
+ // Add (separator length times array_length) - separator length to the
+ // string_length to get the length of the result string.
+ // Load the separator length as untagged.
+ // We assume little endianness, and that the length is positive.
+ __ Ldrsw(separator_length,
+ UntagSmiFieldMemOperand(separator,
+ SeqOneByteString::kLengthOffset));
+ __ Sub(string_length, string_length, separator_length);
+ __ Umaddl(string_length, array_length.W(), separator_length.W(),
+ string_length);
+
+ // Get first element in the array.
+ __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ // Live values in registers:
+ // element: First array element
+ // separator: Separator string
+ // string_length: Length of result string (not smi)
+ // array_length: Length of the array (not smi).
+ __ AllocateAsciiString(result, string_length, scratch1, scratch2, scratch3,
+ &bailout);
+
+ // Prepare for looping. Set up elements_end to end of the array. Set
+ // result_pos to the position of the result where to write the first
+ // character.
+ // TODO(all): useless unless AllocateAsciiString trashes the register.
+ __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
+ __ Add(result_pos, result, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ // Check the length of the separator.
+ __ Cmp(separator_length, 1);
+ __ B(eq, &one_char_separator);
+ __ B(gt, &long_separator);
+
+ // Empty separator case
+ __ Bind(&empty_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+
+ // Copy next array element to the result.
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(string, String::kLengthOffset));
+ __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+ __ Cmp(element, elements_end);
+ __ B(lt, &empty_separator_loop); // End while (element < elements_end).
+ __ B(&done);
+
+ // One-character separator case
+ __ Bind(&one_char_separator);
+ // Replace separator with its ASCII character value.
+ __ Ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ B(&one_char_separator_loop_entry);
+
+ __ Bind(&one_char_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Single separator ASCII char (in lower byte).
+
+ // Copy the separator character to the result.
+ __ Strb(separator, MemOperand(result_pos, 1, PostIndex));
+
+ // Copy next array element to the result.
+ __ Bind(&one_char_separator_loop_entry);
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(string, String::kLengthOffset));
+ __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+ __ Cmp(element, elements_end);
+ __ B(lt, &one_char_separator_loop); // End while (element < elements_end).
+ __ B(&done);
+
+ // Long separator case (separator is more than one character). Entry is at the
+ // label long_separator below.
+ __ Bind(&long_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Separator string.
+
+ // Copy the separator to the result.
+ // TODO(all): hoist next two instructions.
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(separator, String::kLengthOffset));
+ __ Add(string, separator, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+
+ __ Bind(&long_separator);
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ Ldrsw(string_length,
+ UntagSmiFieldMemOperand(string, String::kLengthOffset));
+ __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(result_pos, string, string_length, scratch1);
+ __ Cmp(element, elements_end);
+ __ B(lt, &long_separator_loop); // End while (element < elements_end).
+ __ B(&done);
+
+ __ Bind(&bailout);
+ // Returning undefined will force slower code to handle it.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ Bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
+ Comment cmnt(masm_, "[ InlineRuntimeCall");
+ EmitInlineRuntimeCall(expr);
+ return;
+ }
+
+ Comment cmnt(masm_, "[ CallRunTime");
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ if (expr->is_jsruntime()) {
+ // Push the builtins object as the receiver.
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x0, FieldMemOperand(x10, GlobalObject::kBuiltinsOffset));
+ __ Push(x0);
+
+ // Load the function from the receiver.
+ Handle<String> name = expr->name();
+ __ Mov(x2, Operand(name));
+ CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+
+ // Push the target function under the receiver.
+ __ Pop(x10);
+ __ Push(x0, x10);
+
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Record source position of the IC call.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS);
+ __ Peek(x1, (arg_count + 1) * kPointerSize);
+ __ CallStub(&stub);
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ context()->DropAndPlug(1, x0);
+ } else {
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ context()->Plug(x0);
+ }
+}
+
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::DELETE: {
+ Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+ Property* property = expr->expression()->AsProperty();
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+
+ if (property != NULL) {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ __ Mov(x10, Smi::FromInt(strict_mode()));
+ __ Push(x10);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(x0);
+ } else if (proxy != NULL) {
+ Variable* var = proxy->var();
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // but "delete this" is allowed.
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
+ if (var->IsUnallocated()) {
+ __ Ldr(x12, GlobalObjectMemOperand());
+ __ Mov(x11, Operand(var->name()));
+ __ Mov(x10, Smi::FromInt(SLOPPY));
+ __ Push(x12, x11, x10);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(x0);
+ } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ context()->Plug(var->is_this());
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ Mov(x2, Operand(var->name()));
+ __ Push(context_register(), x2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
+ context()->Plug(x0);
+ }
+ } else {
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ context()->Plug(true);
+ }
+ break;
+ break;
+ }
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ VisitForEffect(expr->expression());
+ context()->Plug(Heap::kUndefinedValueRootIndex);
+ break;
+ }
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(),
+ test->false_label(),
+ test->true_label(),
+ test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
+ } else {
+ ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+ // TODO(jbramley): This could be much more efficient using (for
+ // example) the CSEL instruction.
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(),
+ &materialize_false,
+ &materialize_true,
+ &materialize_true);
+
+ __ Bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ B(&done);
+
+ __ Bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ B(&done);
+
+ __ Bind(&done);
+ if (context()->IsStackValue()) {
+ __ Push(result_register());
+ }
+ }
+ break;
+ }
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ {
+ StackValueContext context(this);
+ VisitForTypeofValue(expr->expression());
+ }
+ __ CallRuntime(Runtime::kTypeof, 1);
+ context()->Plug(x0);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidLeftHandSide());
+
+ Comment cmnt(masm_, "[ CountOperation");
+ SetSourcePosition(expr->position());
+
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ AccumulatorValueContext context(this);
+ EmitVariableLoad(expr->expression()->AsVariableProxy());
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && !context()->IsEffect()) {
+ __ Push(xzr);
+ }
+ if (assign_type == NAMED_PROPERTY) {
+ // Put the object both on the stack and in the accumulator.
+ VisitForAccumulatorValue(prop->obj());
+ __ Push(x0);
+ EmitNamedPropertyLoad(prop);
+ } else {
+ // KEYED_PROPERTY
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ Peek(x1, 0);
+ __ Push(x0);
+ EmitKeyedPropertyLoad(prop);
+ }
+ }
+
+ // We need a second deoptimization point after loading the value
+ // in case evaluating the property load my have a side effect.
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ }
+
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
+ int count_value = expr->op() == Token::INC ? 1 : -1;
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(x0, &slow);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property we
+ // store the result under the receiver that is currently on top of the
+ // stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ Push(x0);
+ break;
+ case NAMED_PROPERTY:
+ __ Poke(x0, kPointerSize);
+ break;
+ case KEYED_PROPERTY:
+ __ Poke(x0, kPointerSize * 2);
+ break;
+ }
+ }
+ }
+
+ __ Adds(x0, x0, Smi::FromInt(count_value));
+ __ B(vc, &done);
+ // Call stub. Undo operation first.
+ __ Sub(x0, x0, Smi::FromInt(count_value));
+ __ B(&stub_call);
+ __ Bind(&slow);
+ }
+ ToNumberStub convert_stub;
+ __ CallStub(&convert_stub);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ Push(x0);
+ break;
+ case NAMED_PROPERTY:
+ __ Poke(x0, kXRegSize);
+ break;
+ case KEYED_PROPERTY:
+ __ Poke(x0, 2 * kXRegSize);
+ break;
+ }
+ }
+ }
+
+ __ Bind(&stub_call);
+ __ Mov(x1, x0);
+ __ Mov(x0, Smi::FromInt(count_value));
+
+ // Record position before stub call.
+ SetSourcePosition(expr->position());
+
+ {
+ Assembler::BlockPoolsScope scope(masm_);
+ BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
+ patch_site.EmitPatchInfo();
+ }
+ __ Bind(&done);
+
+ // Store the value returned in x0.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ { EffectContext context(this);
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context.Plug(x0);
+ }
+ // For all contexts except EffectConstant We have the result on
+ // top of the stack.
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(x0);
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
+ __ Pop(x1);
+ CallStoreIC(expr->CountStoreFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(x0);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ Pop(x1); // Key.
+ __ Pop(x2); // Receiver.
+ Handle<Code> ic = strict_mode() == SLOPPY
+ ? isolate()->builtins()->KeyedStoreIC_Initialize()
+ : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+ CallIC(ic, expr->CountStoreFeedbackId());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(x0);
+ }
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
+ ASSERT(!context()->IsEffect());
+ ASSERT(!context()->IsTest());
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy != NULL && proxy->var()->IsUnallocated()) {
+ Comment cmnt(masm_, "Global variable");
+ __ Ldr(x0, GlobalObjectMemOperand());
+ __ Mov(x2, Operand(proxy->name()));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ CallLoadIC(NOT_CONTEXTUAL);
+ PrepareForBailout(expr, TOS_REG);
+ context()->Plug(x0);
+ } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Label done, slow;
+
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
+
+ __ Bind(&slow);
+ __ Mov(x0, Operand(proxy->name()));
+ __ Push(cp, x0);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
+ PrepareForBailout(expr, TOS_REG);
+ __ Bind(&done);
+
+ context()->Plug(x0);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitInDuplicateContext(expr);
+ }
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Expression* sub_expr,
+ Handle<String> check) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof");
+ Comment cmnt(masm_, "[ EmitLiteralCompareTypeof");
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ { AccumulatorValueContext context(this);
+ VisitForTypeofValue(sub_expr);
+ }
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+ if (check->Equals(isolate()->heap()->number_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof number_string");
+ __ JumpIfSmi(x0, if_true);
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ CompareRoot(x0, Heap::kHeapNumberMapRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->string_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof string_string");
+ __ JumpIfSmi(x0, if_false);
+ // Check for undetectable objects => false.
+ __ JumpIfObjectType(x0, x0, x1, FIRST_NONSTRING_TYPE, if_false, ge);
+ __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
+ __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_true, if_false,
+ fall_through);
+ } else if (check->Equals(isolate()->heap()->symbol_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof symbol_string");
+ __ JumpIfSmi(x0, if_false);
+ __ CompareObjectType(x0, x0, x1, SYMBOL_TYPE);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->boolean_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof boolean_string");
+ __ JumpIfRoot(x0, Heap::kTrueValueRootIndex, if_true);
+ __ CompareRoot(x0, Heap::kFalseValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (FLAG_harmony_typeof &&
+ check->Equals(isolate()->heap()->null_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof null_string");
+ __ CompareRoot(x0, Heap::kNullValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->undefined_string())) {
+ ASM_LOCATION(
+ "FullCodeGenerator::EmitLiteralCompareTypeof undefined_string");
+ __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, if_true);
+ __ JumpIfSmi(x0, if_false);
+ // Check for undetectable objects => true.
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
+ __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_false, if_true,
+ fall_through);
+ } else if (check->Equals(isolate()->heap()->function_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof function_string");
+ __ JumpIfSmi(x0, if_false);
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ JumpIfObjectType(x0, x10, x11, JS_FUNCTION_TYPE, if_true);
+ __ CompareAndSplit(x11, JS_FUNCTION_PROXY_TYPE, eq, if_true, if_false,
+ fall_through);
+
+ } else if (check->Equals(isolate()->heap()->object_string())) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof object_string");
+ __ JumpIfSmi(x0, if_false);
+ if (!FLAG_harmony_typeof) {
+ __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
+ }
+ // Check for JS objects => true.
+ Register map = x10;
+ __ JumpIfObjectType(x0, map, x11, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
+ if_false, lt);
+ __ CompareInstanceType(map, x11, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ B(gt, if_false);
+ // Check for undetectable objects => false.
+ __ Ldrb(x10, FieldMemOperand(map, Map::kBitFieldOffset));
+
+ __ TestAndSplit(x10, 1 << Map::kIsUndetectable, if_true, if_false,
+ fall_through);
+
+ } else {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof other");
+ if (if_false != fall_through) __ B(if_false);
+ }
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+ SetSourcePosition(expr->position());
+
+ // Try to generate an optimized comparison with a literal value.
+ // TODO(jbramley): This only checks common values like NaN or undefined.
+ // Should it also handle ARM64 immediate operands?
+ if (TryLiteralCompare(expr)) {
+ return;
+ }
+
+ // Assign labels according to context()->PrepareTest.
+ Label materialize_true;
+ Label materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Token::Value op = expr->op();
+ VisitForStackValue(expr->left());
+ switch (op) {
+ case Token::IN:
+ VisitForStackValue(expr->right());
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ CompareRoot(x0, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ break;
+
+ case Token::INSTANCEOF: {
+ VisitForStackValue(expr->right());
+ InstanceofStub stub(InstanceofStub::kNoFlags);
+ __ CallStub(&stub);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ // The stub returns 0 for true.
+ __ CompareAndSplit(x0, 0, eq, if_true, if_false, fall_through);
+ break;
+ }
+
+ default: {
+ VisitForAccumulatorValue(expr->right());
+ Condition cond = CompareIC::ComputeCondition(op);
+
+ // Pop the stack value.
+ __ Pop(x1);
+
+ JumpPatchSite patch_site(masm_);
+ if (ShouldInlineSmiCase(op)) {
+ Label slow_case;
+ patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
+ __ Cmp(x1, x0);
+ Split(cond, if_true, if_false, NULL);
+ __ Bind(&slow_case);
+ }
+
+ // Record position and call the compare IC.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ CompareAndSplit(x0, 0, cond, if_true, if_false, fall_through);
+ }
+ }
+
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareNil");
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ VisitForAccumulatorValue(sub_expr);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+ if (expr->op() == Token::EQ_STRICT) {
+ Heap::RootListIndex nil_value = nil == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ CompareRoot(x0, nil_value);
+ Split(eq, if_true, if_false, fall_through);
+ } else {
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ __ CompareAndSplit(x0, 0, ne, if_true, if_false, fall_through);
+ }
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ Comment cmnt(masm_, "[ Yield");
+ // Evaluate yielded value first; the initial iterator definition depends on
+ // this. It stays on the stack while we update the iterator.
+ VisitForStackValue(expr->expression());
+
+ // TODO(jbramley): Tidy this up once the merge is done, using named registers
+ // and suchlike. The implementation changes a little by bleeding_edge so I
+ // don't want to spend too much time on it now.
+
+ switch (expr->yield_kind()) {
+ case Yield::SUSPEND:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ __ Push(result_register());
+ // Fall through.
+ case Yield::INITIAL: {
+ Label suspend, continuation, post_runtime, resume;
+
+ __ B(&suspend);
+
+ // TODO(jbramley): This label is bound here because the following code
+ // looks at its pos(). Is it possible to do something more efficient here,
+ // perhaps using Adr?
+ __ Bind(&continuation);
+ __ B(&resume);
+
+ __ Bind(&suspend);
+ VisitForAccumulatorValue(expr->generator_object());
+ ASSERT((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
+ __ Mov(x1, Smi::FromInt(continuation.pos()));
+ __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
+ __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
+ __ Mov(x1, cp);
+ __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ Add(x1, fp, StandardFrameConstants::kExpressionsOffset);
+ __ Cmp(__ StackPointer(), x1);
+ __ B(eq, &post_runtime);
+ __ Push(x0); // generator object
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Bind(&post_runtime);
+ __ Pop(result_register());
+ EmitReturnSequence();
+
+ __ Bind(&resume);
+ context()->Plug(result_register());
+ break;
+ }
+
+ case Yield::FINAL: {
+ VisitForAccumulatorValue(expr->generator_object());
+ __ Mov(x1, Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
+ __ Str(x1, FieldMemOperand(result_register(),
+ JSGeneratorObject::kContinuationOffset));
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindBeforeReturn();
+ EmitReturnSequence();
+ break;
+ }
+
+ case Yield::DELEGATING: {
+ VisitForStackValue(expr->generator_object());
+
+ // Initial stack layout is as follows:
+ // [sp + 1 * kPointerSize] iter
+ // [sp + 0 * kPointerSize] g
+
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+ Label l_next, l_call, l_loop;
+ // Initial send value is undefined.
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ B(&l_next);
+
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
+ __ Bind(&l_catch);
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+ __ LoadRoot(x2, Heap::kthrow_stringRootIndex); // "throw"
+ __ Peek(x3, 1 * kPointerSize); // iter
+ __ Push(x2, x3, x0); // "throw", iter, except
+ __ B(&l_call);
+
+ // try { received = %yield result }
+ // Shuffle the received result above a try handler and yield it without
+ // re-boxing.
+ __ Bind(&l_try);
+ __ Pop(x0); // result
+ __ PushTryHandler(StackHandler::CATCH, expr->index());
+ const int handler_size = StackHandlerConstants::kSize;
+ __ Push(x0); // result
+ __ B(&l_suspend);
+
+ // TODO(jbramley): This label is bound here because the following code
+ // looks at its pos(). Is it possible to do something more efficient here,
+ // perhaps using Adr?
+ __ Bind(&l_continuation);
+ __ B(&l_resume);
+
+ __ Bind(&l_suspend);
+ const int generator_object_depth = kPointerSize + handler_size;
+ __ Peek(x0, generator_object_depth);
+ __ Push(x0); // g
+ ASSERT((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos()));
+ __ Mov(x1, Smi::FromInt(l_continuation.pos()));
+ __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
+ __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
+ __ Mov(x1, cp);
+ __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Pop(x0); // result
+ EmitReturnSequence();
+ __ Bind(&l_resume); // received in x0
+ __ PopTryHandler();
+
+ // receiver = iter; f = 'next'; arg = received;
+ __ Bind(&l_next);
+ __ LoadRoot(x2, Heap::knext_stringRootIndex); // "next"
+ __ Peek(x3, 1 * kPointerSize); // iter
+ __ Push(x2, x3, x0); // "next", iter, received
+
+ // result = receiver[f](arg);
+ __ Bind(&l_call);
+ __ Peek(x1, 1 * kPointerSize);
+ __ Peek(x0, 2 * kPointerSize);
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallIC(ic, TypeFeedbackId::None());
+ __ Mov(x1, x0);
+ __ Poke(x1, 2 * kPointerSize);
+ CallFunctionStub stub(1, CALL_AS_METHOD);
+ __ CallStub(&stub);
+
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Drop(1); // The function is still on the stack; drop it.
+
+ // if (!result.done) goto l_try;
+ __ Bind(&l_loop);
+ __ Push(x0); // save result
+ __ LoadRoot(x2, Heap::kdone_stringRootIndex); // "done"
+ CallLoadIC(NOT_CONTEXTUAL); // result.done in x0
+ // The ToBooleanStub argument (result.done) is in x0.
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+ CallIC(bool_ic);
+ __ Cbz(x0, &l_try);
+
+ // result.value
+ __ Pop(x0); // result
+ __ LoadRoot(x2, Heap::kvalue_stringRootIndex); // "value"
+ CallLoadIC(NOT_CONTEXTUAL); // result.value in x0
+ context()->DropAndPlug(2, x0); // drop iter and g
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
+ Expression *value,
+ JSGeneratorObject::ResumeMode resume_mode) {
+ ASM_LOCATION("FullCodeGenerator::EmitGeneratorResume");
+ Register value_reg = x0;
+ Register generator_object = x1;
+ Register the_hole = x2;
+ Register operand_stack_size = w3;
+ Register function = x4;
+
+ // The value stays in x0, and is ultimately read by the resumed generator, as
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed. r1
+ // will hold the generator object until the activation has been resumed.
+ VisitForStackValue(generator);
+ VisitForAccumulatorValue(value);
+ __ Pop(generator_object);
+
+ // Check generator state.
+ Label wrong_state, closed_state, done;
+ __ Ldr(x10, FieldMemOperand(generator_object,
+ JSGeneratorObject::kContinuationOffset));
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
+ __ CompareAndBranch(x10, Smi::FromInt(0), eq, &closed_state);
+ __ CompareAndBranch(x10, Smi::FromInt(0), lt, &wrong_state);
+
+ // Load suspended function and context.
+ __ Ldr(cp, FieldMemOperand(generator_object,
+ JSGeneratorObject::kContextOffset));
+ __ Ldr(function, FieldMemOperand(generator_object,
+ JSGeneratorObject::kFunctionOffset));
+
+ // Load receiver and store as the first argument.
+ __ Ldr(x10, FieldMemOperand(generator_object,
+ JSGeneratorObject::kReceiverOffset));
+ __ Push(x10);
+
+ // Push holes for the rest of the arguments to the generator function.
+ __ Ldr(x10, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+
+ // The number of arguments is stored as an int32_t, and -1 is a marker
+ // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
+ // extension to correctly handle it. However, in this case, we operate on
+ // 32-bit W registers, so extension isn't required.
+ __ Ldr(w10, FieldMemOperand(x10,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
+ __ PushMultipleTimes(the_hole, w10);
+
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ Label resume_frame;
+ __ Bl(&resume_frame);
+ __ B(&done);
+
+ __ Bind(&resume_frame);
+ __ Push(lr, // Return address.
+ fp, // Caller's frame pointer.
+ cp, // Callee's context.
+ function); // Callee's JS Function.
+ __ Add(fp, __ StackPointer(), kPointerSize * 2);
+
+ // Load and untag the operand stack size.
+ __ Ldr(x10, FieldMemOperand(generator_object,
+ JSGeneratorObject::kOperandStackOffset));
+ __ Ldr(operand_stack_size,
+ UntagSmiFieldMemOperand(x10, FixedArray::kLengthOffset));
+
+ // If we are sending a value and there is no operand stack, we can jump back
+ // in directly.
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ Label slow_resume;
+ __ Cbnz(operand_stack_size, &slow_resume);
+ __ Ldr(x10, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ __ Ldrsw(x11,
+ UntagSmiFieldMemOperand(generator_object,
+ JSGeneratorObject::kContinuationOffset));
+ __ Add(x10, x10, x11);
+ __ Mov(x12, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+ __ Str(x12, FieldMemOperand(generator_object,
+ JSGeneratorObject::kContinuationOffset));
+ __ Br(x10);
+
+ __ Bind(&slow_resume);
+ }
+
+ // Otherwise, we push holes for the operand stack and call the runtime to fix
+ // up the stack and the handlers.
+ __ PushMultipleTimes(the_hole, operand_stack_size);
+
+ __ Mov(x10, Smi::FromInt(resume_mode));
+ __ Push(generator_object, result_register(), x10);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
+ // Not reached: the runtime call returns elsewhere.
+ __ Unreachable();
+
+ // Reach here when generator is closed.
+ __ Bind(&closed_state);
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ // Return completed iterator result when generator is closed.
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ __ Push(x10);
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(true);
+ } else {
+ // Throw the provided value.
+ __ Push(value_reg);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
+ }
+ __ B(&done);
+
+ // Throw error if we attempt to operate on a running generator.
+ __ Bind(&wrong_state);
+ __ Push(generator_object);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
+
+ __ Bind(&done);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->generator_result_map());
+
+ // Allocate and populate an object with this form: { value: VAL, done: DONE }
+
+ Register result = x0;
+ __ Allocate(map->instance_size(), result, x10, x11, &gc_required, TAG_OBJECT);
+ __ B(&allocated);
+
+ __ Bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ Ldr(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ __ Bind(&allocated);
+ Register map_reg = x1;
+ Register result_value = x2;
+ Register boolean_done = x3;
+ Register empty_fixed_array = x4;
+ __ Mov(map_reg, Operand(map));
+ __ Pop(result_value);
+ __ Mov(boolean_done, Operand(isolate()->factory()->ToBoolean(done)));
+ __ Mov(empty_fixed_array, Operand(isolate()->factory()->empty_fixed_array()));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ // TODO(jbramley): Use Stp if possible.
+ __ Str(map_reg, FieldMemOperand(result, HeapObject::kMapOffset));
+ __ Str(empty_fixed_array,
+ FieldMemOperand(result, JSObject::kPropertiesOffset));
+ __ Str(empty_fixed_array, FieldMemOperand(result, JSObject::kElementsOffset));
+ __ Str(result_value,
+ FieldMemOperand(result,
+ JSGeneratorObject::kResultValuePropertyOffset));
+ __ Str(boolean_done,
+ FieldMemOperand(result,
+ JSGeneratorObject::kResultDonePropertyOffset));
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(result, JSGeneratorObject::kResultValuePropertyOffset,
+ x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+}
+
+
+// TODO(all): I don't like this method.
+// It seems to me that in too many places x0 is used in place of this.
+// Also, this function is not suitable for all places where x0 should be
+// abstracted (eg. when used as an argument). But some places assume that the
+// first argument register is x0, and use this function instead.
+// Considering that most of the register allocation is hard-coded in the
+// FullCodeGen, that it is unlikely we will need to change it extensively, and
+// that abstracting the allocation through functions would not yield any
+// performance benefit, I think the existence of this function is debatable.
+Register FullCodeGenerator::result_register() {
+ return x0;
+}
+
+
+Register FullCodeGenerator::context_register() {
+ return cp;
+}
+
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ ASSERT(POINTER_SIZE_ALIGN(frame_offset) == frame_offset);
+ __ Str(value, MemOperand(fp, frame_offset));
+}
+
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ Ldr(dst, ContextMemOperand(cp, context_index));
+}
+
+
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope() ||
+ declaration_scope->is_module_scope()) {
+ // Contexts nested in the native context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ ASSERT(kSmiTag == 0);
+ __ Push(xzr);
+ } else if (declaration_scope->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ Ldr(x10, ContextMemOperand(cp, Context::CLOSURE_INDEX));
+ __ Push(x10);
+ } else {
+ ASSERT(declaration_scope->is_function_scope());
+ __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(x10);
+ }
+}
+
+
+void FullCodeGenerator::EnterFinallyBlock() {
+ ASM_LOCATION("FullCodeGenerator::EnterFinallyBlock");
+ ASSERT(!result_register().is(x10));
+ // Preserve the result register while executing finally block.
+ // Also cook the return address in lr to the stack (smi encoded Code* delta).
+ __ Sub(x10, lr, Operand(masm_->CodeObject()));
+ __ SmiTag(x10);
+ __ Push(result_register(), x10);
+
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ Mov(x10, pending_message_obj);
+ __ Ldr(x10, MemOperand(x10));
+
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ Mov(x11, has_pending_message);
+ __ Ldr(x11, MemOperand(x11));
+ __ SmiTag(x11);
+
+ __ Push(x10, x11);
+
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ Mov(x10, pending_message_script);
+ __ Ldr(x10, MemOperand(x10));
+ __ Push(x10);
+}
+
+
+void FullCodeGenerator::ExitFinallyBlock() {
+ ASM_LOCATION("FullCodeGenerator::ExitFinallyBlock");
+ ASSERT(!result_register().is(x10));
+
+ // Restore pending message from stack.
+ __ Pop(x10, x11, x12);
+ ExternalReference pending_message_script =
+ ExternalReference::address_of_pending_message_script(isolate());
+ __ Mov(x13, pending_message_script);
+ __ Str(x10, MemOperand(x13));
+
+ __ SmiUntag(x11);
+ ExternalReference has_pending_message =
+ ExternalReference::address_of_has_pending_message(isolate());
+ __ Mov(x13, has_pending_message);
+ __ Str(x11, MemOperand(x13));
+
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ Mov(x13, pending_message_obj);
+ __ Str(x12, MemOperand(x13));
+
+ // Restore result register and cooked return address from the stack.
+ __ Pop(x10, result_register());
+
+ // Uncook the return address (see EnterFinallyBlock).
+ __ SmiUntag(x10);
+ __ Add(x11, x10, Operand(masm_->CodeObject()));
+ __ Br(x11);
+}
+
+
+#undef __
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ // Turn the jump into a nop.
+ Address branch_address = pc - 3 * kInstructionSize;
+ PatchingAssembler patcher(branch_address, 1);
+
+ ASSERT(Instruction::Cast(branch_address)
+ ->IsNop(Assembler::INTERRUPT_CODE_NOP) ||
+ (Instruction::Cast(branch_address)->IsCondBranchImm() &&
+ Instruction::Cast(branch_address)->ImmPCOffset() ==
+ 6 * kInstructionSize));
+
+ switch (target_state) {
+ case INTERRUPT:
+ // <decrement profiling counter>
+ // .. .. .. .. b.pl ok
+ // .. .. .. .. ldr x16, pc+<interrupt stub address>
+ // .. .. .. .. blr x16
+ // ... more instructions.
+ // ok-label
+ // Jump offset is 6 instructions.
+ patcher.b(6, pl);
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // <decrement profiling counter>
+ // .. .. .. .. mov x0, x0 (NOP)
+ // .. .. .. .. ldr x16, pc+<on-stack replacement address>
+ // .. .. .. .. blr x16
+ patcher.nop(Assembler::INTERRUPT_CODE_NOP);
+ break;
+ }
+
+ // Replace the call address.
+ Instruction* load = Instruction::Cast(pc)->preceding(2);
+ Address interrupt_address_pointer =
+ reinterpret_cast<Address>(load) + load->ImmPCOffset();
+ ASSERT((Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->OnStackReplacement()
+ ->entry())) ||
+ (Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->InterruptCheck()
+ ->entry())) ||
+ (Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->OsrAfterStackCheck()
+ ->entry())) ||
+ (Memory::uint64_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
+ ->builtins()
+ ->OnStackReplacement()
+ ->entry())));
+ Memory::uint64_at(interrupt_address_pointer) =
+ reinterpret_cast<uint64_t>(replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, reinterpret_cast<Address>(load), replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ // TODO(jbramley): There should be some extra assertions here (as in the ARM
+ // back-end), but this function is gone in bleeding_edge so it might not
+ // matter anyway.
+ Instruction* jump_or_nop = Instruction::Cast(pc)->preceding(3);
+
+ if (jump_or_nop->IsNop(Assembler::INTERRUPT_CODE_NOP)) {
+ Instruction* load = Instruction::Cast(pc)->preceding(2);
+ uint64_t entry = Memory::uint64_at(reinterpret_cast<Address>(load) +
+ load->ImmPCOffset());
+ if (entry == reinterpret_cast<uint64_t>(
+ isolate->builtins()->OnStackReplacement()->entry())) {
+ return ON_STACK_REPLACEMENT;
+ } else if (entry == reinterpret_cast<uint64_t>(
+ isolate->builtins()->OsrAfterStackCheck()->entry())) {
+ return OSR_AFTER_STACK_CHECK;
+ } else {
+ UNREACHABLE();
+ }
+ }
+
+ return INTERRUPT;
+}
+
+
+#define __ ACCESS_MASM(masm())
+
+
+FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
+ int* stack_depth,
+ int* context_length) {
+ ASM_LOCATION("FullCodeGenerator::TryFinally::Exit");
+ // The macros used here must preserve the result register.
+
+ // Because the handler block contains the context of the finally
+ // code, we can restore it directly from there for the finally code
+ // rather than iteratively unwinding contexts via their previous
+ // links.
+ __ Drop(*stack_depth); // Down to the handler block.
+ if (*context_length > 0) {
+ // Restore the context to its dedicated register and the stack.
+ __ Peek(cp, StackHandlerConstants::kContextOffset);
+ __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ PopTryHandler();
+ __ Bl(finally_entry_);
+
+ *stack_depth = 0;
+ *context_length = 0;
+ return previous_;
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/ic-arm64.cc b/deps/v8/src/arm64/ic-arm64.cc
new file mode 100644
index 000000000..5fb7d633f
--- /dev/null
+++ b/deps/v8/src/arm64/ic-arm64.cc
@@ -0,0 +1,1407 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "arm64/assembler-arm64.h"
+#include "code-stubs.h"
+#include "codegen.h"
+#include "disasm.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+// "type" holds an instance type on entry and is not clobbered.
+// Generated code branch on "global_object" if type is any kind of global
+// JS object.
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+ Register type,
+ Label* global_object) {
+ __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
+ __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
+ __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
+ __ B(eq, global_object);
+}
+
+
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+//
+// "receiver" holds the receiver on entry and is unchanged.
+// "elements" holds the property dictionary on fall through.
+static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register elements,
+ Register scratch0,
+ Register scratch1,
+ Label* miss) {
+ ASSERT(!AreAliased(receiver, elements, scratch0, scratch1));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ // Check that the receiver is a valid JS object.
+ // Let t be the object instance type, we want:
+ // FIRST_SPEC_OBJECT_TYPE <= t <= LAST_SPEC_OBJECT_TYPE.
+ // Since LAST_SPEC_OBJECT_TYPE is the last possible instance type we only
+ // check the lower bound.
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+
+ __ JumpIfObjectType(receiver, scratch0, scratch1, FIRST_SPEC_OBJECT_TYPE,
+ miss, lt);
+
+ // scratch0 now contains the map of the receiver and scratch1 the object type.
+ Register map = scratch0;
+ Register type = scratch1;
+
+ // Check if the receiver is a global JS object.
+ GenerateGlobalInstanceTypeCheck(masm, type, miss);
+
+ // Check that the object does not require access checks.
+ __ Ldrb(scratch1, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, miss);
+ __ Tbnz(scratch1, Map::kHasNamedInterceptor, miss);
+
+ // Check that the properties dictionary is valid.
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(scratch1, Heap::kHashTableMapRootIndex, miss);
+}
+
+
+// Helper function used from LoadIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// result: Register for the result. It is only updated if a jump to the miss
+// label is not done.
+// The scratch registers need to be different from elements, name and result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register result,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(elements, name, scratch1, scratch2));
+ ASSERT(!AreAliased(result, scratch1, scratch2));
+
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry check that the value is a normal property.
+ __ Bind(&done);
+
+ static const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
+ __ B(ne, miss);
+
+ // Get the value at the masked, scaled index and return.
+ __ Ldr(result,
+ FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// value: The value to store (never clobbered).
+//
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register value,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(elements, name, value, scratch1, scratch2));
+
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry in the dictionary check that the value
+ // is a normal property that is not read only.
+ __ Bind(&done);
+
+ static const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ static const int kTypeAndReadOnlyMask =
+ PropertyDetails::TypeField::kMask |
+ PropertyDetails::AttributesField::encode(READ_ONLY);
+ __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
+ __ Tst(scratch1, kTypeAndReadOnlyMask);
+ __ B(ne, miss);
+
+ // Store the value at the masked, scaled index and return.
+ static const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
+ __ Str(value, MemOperand(scratch2));
+
+ // Update the write barrier. Make sure not to clobber the value.
+ __ Mov(scratch1, value);
+ __ RecordWrite(
+ elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object and return the map of the
+// receiver in 'map_scratch' if the receiver is not a SMI.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register map_scratch,
+ Register scratch,
+ int interceptor_bit,
+ Label* slow) {
+ ASSERT(!AreAliased(map_scratch, scratch));
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, slow);
+ // Get the map of the receiver.
+ __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check bit field.
+ __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
+ __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
+ __ Tbnz(scratch, interceptor_bit, slow);
+
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object, we enter the
+ // runtime system to make sure that indexing into string objects work
+ // as intended.
+ STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
+ __ Cmp(scratch, JS_OBJECT_TYPE);
+ __ B(lt, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+//
+// receiver - holds the receiver on entry.
+// Unchanged unless 'result' is the same register.
+//
+// key - holds the smi key on entry.
+// Unchanged unless 'result' is the same register.
+//
+// elements - holds the elements of the receiver on exit.
+//
+// elements_map - holds the elements map on exit if the not_fast_array branch is
+// taken. Otherwise, this is used as a scratch register.
+//
+// result - holds the result on exit if the load succeeded.
+// Allowed to be the the same as 'receiver' or 'key'.
+// Unchanged on bailout so 'receiver' and 'key' can be safely
+// used by further computation.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register elements_map,
+ Register scratch2,
+ Register result,
+ Label* not_fast_array,
+ Label* slow) {
+ ASSERT(!AreAliased(receiver, key, elements, elements_map, scratch2));
+
+ // Check for fast array.
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode and writable.
+ __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
+ not_fast_array);
+ } else {
+ __ AssertFastElements(elements);
+ }
+
+ // The elements_map register is only used for the not_fast_array path, which
+ // was handled above. From this point onward it is a scratch register.
+ Register scratch1 = elements_map;
+
+ // Check that the key (index) is within bounds.
+ __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(key, scratch1);
+ __ B(hs, slow);
+
+ // Fast case: Do the load.
+ __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(scratch2, key);
+ __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
+
+ // Move the value to the result register.
+ // 'result' can alias with 'receiver' or 'key' but these two must be
+ // preserved if we jump to 'slow'.
+ __ Mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+// The map of the key is returned in 'map_scratch'.
+// If the jump to 'index_string' is done the hash of the key is left
+// in 'hash_scratch'.
+static void GenerateKeyNameCheck(MacroAssembler* masm,
+ Register key,
+ Register map_scratch,
+ Register hash_scratch,
+ Label* index_string,
+ Label* not_unique) {
+ ASSERT(!AreAliased(key, map_scratch, hash_scratch));
+
+ // Is the key a name?
+ Label unique;
+ __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
+ not_unique, hi);
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ B(eq, &unique);
+
+ // Is the string an array index with cached numeric value?
+ __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
+ __ TestAndBranchIfAllClear(hash_scratch,
+ Name::kContainsCachedArrayIndexMask,
+ index_string);
+
+ // Is the string internalized? We know it's a string, so a single bit test is
+ // enough.
+ __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
+
+ __ Bind(&unique);
+ // Fall through if the key is a unique name.
+}
+
+
+// Neither 'object' nor 'key' are modified by this function.
+//
+// If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is
+// left with the object's elements map. Otherwise, it is used as a scratch
+// register.
+static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register map,
+ Register scratch1,
+ Register scratch2,
+ Label* unmapped_case,
+ Label* slow_case) {
+ ASSERT(!AreAliased(object, key, map, scratch1, scratch2));
+
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the elements
+ // map check later, we do not need to check for interceptors or
+ // whether it requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE,
+ slow_case, lt);
+
+ // Check that the key is a positive smi.
+ __ JumpIfNotSmi(key, slow_case);
+ __ Tbnz(key, kXSignBit, slow_case);
+
+ // Load the elements object and check its map.
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+ __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup.
+ __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset));
+ __ Sub(scratch1, scratch1, Smi::FromInt(2));
+ __ Cmp(key, scratch1);
+ __ B(hs, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ static const int offset =
+ FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+ __ Add(scratch1, map, offset);
+ __ SmiUntag(scratch2, key);
+ __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+ __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
+
+ // Load value from context and return it.
+ __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize));
+ __ SmiUntag(scratch1);
+ __ Lsl(scratch1, scratch1, kPointerSizeLog2);
+ __ Add(scratch1, scratch1, Context::kHeaderSize - kHeapObjectTag);
+ // The base of the result (scratch2) is passed to RecordWrite in
+ // KeyedStoreIC::GenerateSloppyArguments and it must be a HeapObject.
+ return MemOperand(scratch2, scratch1);
+}
+
+
+// The 'parameter_map' register must be loaded with the parameter map of the
+// arguments object and is overwritten.
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ ASSERT(!AreAliased(key, parameter_map, scratch));
+
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(
+ backing_store, scratch, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
+ __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+ __ Cmp(key, scratch);
+ __ B(hs, slow_case);
+
+ __ Add(backing_store,
+ backing_store,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(scratch, key);
+ return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
+}
+
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, x0, x2, x3, x4, x5, x6);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+ Label miss;
+
+ GenerateNameDictionaryReceiverCheck(masm, x0, x1, x3, x4, &miss);
+
+ // x1 now holds the property dictionary.
+ GenerateDictionaryLoad(masm, &miss, x1, x2, x0, x3, x4);
+ __ Ret();
+
+ // Cache miss: Jump to runtime.
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+ ASM_LOCATION("LoadIC::GenerateMiss");
+
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
+
+ // Perform tail call to the entry.
+ __ Push(x0, x2);
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- x2 : name
+ // -- lr : return address
+ // -- x0 : receiver
+ // -----------------------------------
+
+ __ Push(x0, x2);
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Register result = x0;
+ Register key = x0;
+ Register receiver = x1;
+ Label miss, unmapped;
+
+ Register map_scratch = x2;
+ MemOperand mapped_location = GenerateMappedArgumentsLookup(
+ masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss);
+ __ Ldr(result, mapped_location);
+ __ Ret();
+
+ __ Bind(&unmapped);
+ // Parameter map is left in map_scratch when a jump on unmapped is done.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss);
+ __ Ldr(x2, unmapped_location);
+ __ JumpIfRoot(x2, Heap::kTheHoleValueRootIndex, &miss);
+ // Move the result in x0. x0 must be preserved on miss.
+ __ Mov(result, x2);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments");
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -----------------------------------
+
+ Label slow, notin;
+
+ Register value = x0;
+ Register key = x1;
+ Register receiver = x2;
+ Register map = x3;
+
+ // These registers are used by GenerateMappedArgumentsLookup to build a
+ // MemOperand. They are live for as long as the MemOperand is live.
+ Register mapped1 = x4;
+ Register mapped2 = x5;
+
+ MemOperand mapped =
+ GenerateMappedArgumentsLookup(masm, receiver, key, map,
+ mapped1, mapped2,
+ &notin, &slow);
+ Operand mapped_offset = mapped.OffsetAsOperand();
+ __ Str(value, mapped);
+ __ Add(x10, mapped.base(), mapped_offset);
+ __ Mov(x11, value);
+ __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ Ret();
+
+ __ Bind(&notin);
+
+ // These registers are used by GenerateMappedArgumentsLookup to build a
+ // MemOperand. They are live for as long as the MemOperand is live.
+ Register unmapped1 = map; // This is assumed to alias 'map'.
+ Register unmapped2 = x4;
+ MemOperand unmapped =
+ GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
+ Operand unmapped_offset = unmapped.OffsetAsOperand();
+ __ Str(value, unmapped);
+ __ Add(x10, unmapped.base(), unmapped_offset);
+ __ Mov(x11, value);
+ __ RecordWrite(unmapped.base(), x10, x11,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ Ret();
+ __ Bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
+
+ __ Push(x1, x0);
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Register key = x0;
+ Register receiver = x1;
+
+ __ Push(receiver, key);
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm,
+ Register key,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label *slow) {
+ ASSERT(!AreAliased(
+ key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
+
+ Isolate* isolate = masm->isolate();
+ Label check_number_dictionary;
+ // If we can load the value, it should be returned in x0.
+ Register result = x0;
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, scratch1, scratch2, Map::kHasIndexedInterceptor, slow);
+
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
+
+ GenerateFastArrayLoad(
+ masm, receiver, key, scratch3, scratch2, scratch1, result, NULL, slow);
+ __ IncrementCounter(
+ isolate->counters()->keyed_load_generic_smi(), 1, scratch1, scratch2);
+ __ Ret();
+
+ __ Bind(&check_number_dictionary);
+ __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
+
+ // Check whether we have a number dictionary.
+ __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
+
+ __ LoadFromNumberDictionary(
+ slow, scratch3, key, result, scratch1, scratch2, scratch4, scratch5);
+ __ Ret();
+}
+
+static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
+ Register key,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label *slow) {
+ ASSERT(!AreAliased(
+ key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
+
+ Isolate* isolate = masm->isolate();
+ Label probe_dictionary, property_array_property;
+ // If we can load the value, it should be returned in x0.
+ Register result = x0;
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, scratch1, scratch2, Map::kHasNamedInterceptor, slow);
+
+ // If the receiver is a fast-case object, check the keyed lookup cache.
+ // Otherwise probe the dictionary.
+ __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+ __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
+
+ // We keep the map of the receiver in scratch1.
+ Register receiver_map = scratch1;
+
+ // Load the map of the receiver, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the name hash.
+ __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift));
+ __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset));
+ __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift));
+ int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+ __ And(scratch2, scratch2, mask);
+
+ // Load the key (consisting of map and unique name) from the cache and
+ // check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(isolate);
+
+ __ Mov(scratch3, cache_keys);
+ __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ // Load map and make scratch3 pointing to the next entry.
+ __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex));
+ __ Cmp(receiver_map, scratch4);
+ __ B(ne, &try_next_entry);
+ __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize)); // Load name
+ __ Cmp(key, scratch4);
+ __ B(eq, &hit_on_nth_entry[i]);
+ __ Bind(&try_next_entry);
+ }
+
+ // Last entry.
+ __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex));
+ __ Cmp(receiver_map, scratch4);
+ __ B(ne, slow);
+ __ Ldr(scratch4, MemOperand(scratch3));
+ __ Cmp(key, scratch4);
+ __ B(ne, slow);
+
+ // Get field offset.
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ Bind(&hit_on_nth_entry[i]);
+ __ Mov(scratch3, cache_field_offsets);
+ if (i != 0) {
+ __ Add(scratch2, scratch2, i);
+ }
+ __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2));
+ __ Ldrb(scratch5,
+ FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset));
+ __ Subs(scratch4, scratch4, scratch5);
+ __ B(ge, &property_array_property);
+ if (i != 0) {
+ __ B(&load_in_object_property);
+ }
+ }
+
+ // Load in-object property.
+ __ Bind(&load_in_object_property);
+ __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
+ __ Add(scratch5, scratch5, scratch4); // Index from start of object.
+ __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag.
+ __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1, scratch1, scratch2);
+ __ Ret();
+
+ // Load property array property.
+ __ Bind(&property_array_property);
+ __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1, scratch1, scratch2);
+ __ Ret();
+
+ // Do a quick inline probe of the receiver's dictionary, if it exists.
+ __ Bind(&probe_dictionary);
+ __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
+ // Load the property.
+ GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
+ 1, scratch1, scratch2);
+ __ Ret();
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Label slow, check_name, index_smi, index_name;
+
+ Register key = x0;
+ Register receiver = x1;
+
+ __ JumpIfNotSmi(key, &check_name);
+ __ Bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+ GenerateKeyedLoadWithSmiKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
+
+ // Slow case, key and receiver still in x0 and x1.
+ __ Bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_generic_slow(), 1, x2, x3);
+ GenerateRuntimeGetProperty(masm);
+
+ __ Bind(&check_name);
+ GenerateKeyNameCheck(masm, key, x2, x3, &index_name, &slow);
+
+ GenerateKeyedLoadWithNameKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
+
+ __ Bind(&index_name);
+ __ IndexFromHash(x3, key);
+ // Now jump to the place where smi keys are handled.
+ __ B(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key (index)
+ // -- x1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ Register index = x0;
+ Register receiver = x1;
+ Register result = x0;
+ Register scratch = x3;
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Label slow;
+ Register key = x0;
+ Register receiver = x1;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+
+ // Check that the key is an array index, that is Uint32.
+ __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
+
+ // Get the map of the receiver.
+ Register map = x2;
+ __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+ // Check that it has indexed interceptor and access checks
+ // are not enabled for this object.
+ __ Ldrb(x3, FieldMemOperand(map, Map::kBitFieldOffset));
+ ASSERT(kSlowCaseBitFieldMask ==
+ ((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor)));
+ __ Tbnz(x3, Map::kIsAccessCheckNeeded, &slow);
+ __ Tbz(x3, Map::kHasIndexedInterceptor, &slow);
+
+ // Everything is fine, call runtime.
+ __ Push(receiver, key);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
+ masm->isolate()),
+ 2,
+ 1);
+
+ __ Bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateMiss");
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(x2, x1, x0);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateSlow");
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(x2, x1, x0);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty");
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(x2, x1, x0);
+
+ // Push PropertyAttributes(NONE) and strict_mode for runtime call.
+ STATIC_ASSERT(NONE == 0);
+ __ Mov(x10, Smi::FromInt(strict_mode));
+ __ Push(xzr, x10);
+
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+ MacroAssembler* masm,
+ Label* fast_object,
+ Label* fast_double,
+ Label* slow,
+ KeyedStoreCheckMap check_map,
+ KeyedStoreIncrementLength increment_length,
+ Register value,
+ Register key,
+ Register receiver,
+ Register receiver_map,
+ Register elements_map,
+ Register elements) {
+ ASSERT(!AreAliased(
+ value, key, receiver, receiver_map, elements_map, elements, x10, x11));
+
+ Label transition_smi_elements;
+ Label transition_double_elements;
+ Label fast_double_without_map_check;
+ Label non_double_value;
+ Label finish_store;
+
+ __ Bind(fast_object);
+ if (check_map == kCheckMap) {
+ __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ B(ne, fast_double);
+ }
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because there
+ // may be a callback on the element.
+ Label holecheck_passed;
+ __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+ __ Ldr(x11, MemOperand(x10));
+ __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
+ __ bind(&holecheck_passed);
+
+ // Smi stores don't require further checks.
+ __ JumpIfSmi(value, &finish_store);
+
+ // Escape to elements kind transition case.
+ __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
+
+ __ Bind(&finish_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Add(x10, key, Smi::FromInt(1));
+ __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+
+ Register address = x11;
+ __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+ __ Str(value, MemOperand(address));
+
+ Label dont_record_write;
+ __ JumpIfSmi(value, &dont_record_write);
+
+ // Update write barrier for the elements array address.
+ __ Mov(x10, value); // Preserve the value which is returned.
+ __ RecordWrite(elements,
+ address,
+ x10,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ __ Bind(&dont_record_write);
+ __ Ret();
+
+
+ __ Bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
+ }
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so go to
+ // the runtime.
+ __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+ __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+ __ Ldr(x11, MemOperand(x10));
+ __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
+
+ __ Bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value,
+ key,
+ elements,
+ x10,
+ d0,
+ d1,
+ &transition_double_elements);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Add(x10, key, Smi::FromInt(1));
+ __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ __ Ret();
+
+
+ __ Bind(&transition_smi_elements);
+ // Transition the array appropriately depending on the value type.
+ __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS,
+ receiver_map,
+ x10,
+ x11,
+ slow);
+ ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ B(&fast_double_without_map_check);
+
+ __ Bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ x10,
+ x11,
+ slow);
+ ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
+ slow);
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ B(&finish_store);
+
+ __ Bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ x10,
+ x11,
+ slow);
+ ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ B(&finish_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ ASM_LOCATION("KeyedStoreIC::GenerateGeneric");
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : key
+ // -- x2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+ Label slow;
+ Label array;
+ Label fast_object;
+ Label extra;
+ Label fast_object_grow;
+ Label fast_double_grow;
+ Label fast_double;
+
+ Register value = x0;
+ Register key = x1;
+ Register receiver = x2;
+ Register receiver_map = x3;
+ Register elements = x4;
+ Register elements_map = x5;
+
+ __ JumpIfNotSmi(key, &slow);
+ __ JumpIfSmi(receiver, &slow);
+ __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
+ __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(
+ x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
+
+ // Check if the object is a JS array or not.
+ Register instance_type = x10;
+ __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
+ __ B(eq, &array);
+ // Check that the object is some kind of JSObject.
+ __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE);
+ __ B(lt, &slow);
+
+ // Object case: Check key against length in the elements array.
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // Check array bounds. Both the key and the length of FixedArray are smis.
+ __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(x10, Operand::UntagSmi(key));
+ __ B(hi, &fast_object);
+
+
+ __ Bind(&slow);
+ // Slow case, handle jump to runtime.
+ // Live values:
+ // x0: value
+ // x1: key
+ // x2: receiver
+ GenerateRuntimeSetProperty(masm, strict_mode);
+
+
+ __ Bind(&extra);
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+
+ // Check for room in the elements backing store.
+ // Both the key and the length of FixedArray are smis.
+ __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(x10, Operand::UntagSmi(key));
+ __ B(ls, &slow);
+
+ __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ B(eq, &fast_object_grow);
+ __ Cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ B(eq, &fast_double_grow);
+ __ B(&slow);
+
+
+ __ Bind(&array);
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
+
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+ // Check the key against the length in the array.
+ __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Cmp(x10, Operand::UntagSmi(key));
+ __ B(eq, &extra); // We can handle the case where we are appending 1 element.
+ __ B(lo, &slow);
+
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
+ &slow, kCheckMap, kDontIncrementLength,
+ value, key, receiver, receiver_map,
+ elements_map, elements);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+ &slow, kDontCheckMap, kIncrementLength,
+ value, key, receiver, receiver_map,
+ elements_map, elements);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, x1, x2, x3, x4, x5, x6);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ __ Push(x1, x2, x0);
+
+ // Tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+ Register value = x0;
+ Register receiver = x1;
+ Register name = x2;
+ Register dictionary = x3;
+
+ GenerateNameDictionaryReceiverCheck(
+ masm, receiver, dictionary, x4, x5, &miss);
+
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
+ __ Ret();
+
+ // Cache miss: Jump to runtime.
+ __ Bind(&miss);
+ __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty");
+ // ----------- S t a t e -------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ __ Push(x1, x2, x0);
+
+ __ Mov(x11, Smi::FromInt(NONE)); // PropertyAttributes
+ __ Mov(x10, Smi::FromInt(strict_mode));
+ __ Push(x11, x10);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+}
+
+
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, name and value for runtime call.
+ __ Push(x1, x2, x0);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return al;
+ }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address info_address =
+ Assembler::return_address_from_call_start(address);
+
+ InstructionSequence* patch_info = InstructionSequence::At(info_address);
+ return patch_info->IsInlineData();
+}
+
+
+// Activate a SMI fast-path by patching the instructions generated by
+// JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
+// JumpPatchSite::EmitPatchInfo().
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+ // The patch information is encoded in the instruction stream using
+ // instructions which have no side effects, so we can safely execute them.
+ // The patch information is encoded directly after the call to the helper
+ // function which is requesting this patch operation.
+ Address info_address =
+ Assembler::return_address_from_call_start(address);
+ InlineSmiCheckInfo info(info_address);
+
+ // Check and decode the patch information instruction.
+ if (!info.HasSmiCheck()) {
+ return;
+ }
+
+ if (FLAG_trace_ic) {
+ PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n",
+ address, info_address, reinterpret_cast<void*>(info.SmiCheck()));
+ }
+
+ // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
+ // and JumpPatchSite::EmitJumpIfSmi().
+ // Changing
+ // tb(n)z xzr, #0, <target>
+ // to
+ // tb(!n)z test_reg, #0, <target>
+ Instruction* to_patch = info.SmiCheck();
+ PatchingAssembler patcher(to_patch, 1);
+ ASSERT(to_patch->IsTestBranch());
+ ASSERT(to_patch->ImmTestBranchBit5() == 0);
+ ASSERT(to_patch->ImmTestBranchBit40() == 0);
+
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
+
+ int branch_imm = to_patch->ImmTestBranch();
+ Register smi_reg;
+ if (check == ENABLE_INLINED_SMI_CHECK) {
+ ASSERT(to_patch->Rt() == xzr.code());
+ smi_reg = info.SmiRegister();
+ } else {
+ ASSERT(check == DISABLE_INLINED_SMI_CHECK);
+ ASSERT(to_patch->Rt() != xzr.code());
+ smi_reg = xzr;
+ }
+
+ if (to_patch->Mask(TestBranchMask) == TBZ) {
+ // This is JumpIfNotSmi(smi_reg, branch_imm).
+ patcher.tbnz(smi_reg, 0, branch_imm);
+ } else {
+ ASSERT(to_patch->Mask(TestBranchMask) == TBNZ);
+ // This is JumpIfSmi(smi_reg, branch_imm).
+ patcher.tbz(smi_reg, 0, branch_imm);
+ }
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc
new file mode 100644
index 000000000..4d1428a15
--- /dev/null
+++ b/deps/v8/src/arm64/instructions-arm64.cc
@@ -0,0 +1,333 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#define ARM64_DEFINE_FP_STATICS
+
+#include "arm64/instructions-arm64.h"
+#include "arm64/assembler-arm64-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+bool Instruction::IsLoad() const {
+ if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+ return false;
+ }
+
+ if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+ return Mask(LoadStorePairLBit) != 0;
+ } else {
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
+ switch (op) {
+ case LDRB_w:
+ case LDRH_w:
+ case LDR_w:
+ case LDR_x:
+ case LDRSB_w:
+ case LDRSB_x:
+ case LDRSH_w:
+ case LDRSH_x:
+ case LDRSW_x:
+ case LDR_s:
+ case LDR_d: return true;
+ default: return false;
+ }
+ }
+}
+
+
+bool Instruction::IsStore() const {
+ if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+ return false;
+ }
+
+ if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+ return Mask(LoadStorePairLBit) == 0;
+ } else {
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
+ switch (op) {
+ case STRB_w:
+ case STRH_w:
+ case STR_w:
+ case STR_x:
+ case STR_s:
+ case STR_d: return true;
+ default: return false;
+ }
+ }
+}
+
+
+static uint64_t RotateRight(uint64_t value,
+ unsigned int rotate,
+ unsigned int width) {
+ ASSERT(width <= 64);
+ rotate &= 63;
+ return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
+ (value >> rotate);
+}
+
+
+static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
+ uint64_t value,
+ unsigned width) {
+ ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
+ (width == 32));
+ ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+ uint64_t result = value & ((1UL << width) - 1UL);
+ for (unsigned i = width; i < reg_size; i *= 2) {
+ result |= (result << i);
+ }
+ return result;
+}
+
+
+// Logical immediates can't encode zero, so a return value of zero is used to
+// indicate a failure case. Specifically, where the constraints on imm_s are not
+// met.
+uint64_t Instruction::ImmLogical() {
+ unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits;
+ int64_t n = BitN();
+ int64_t imm_s = ImmSetBits();
+ int64_t imm_r = ImmRotate();
+
+ // An integer is constructed from the n, imm_s and imm_r bits according to
+ // the following table:
+ //
+ // N imms immr size S R
+ // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+ // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+ // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+ // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+ // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+ // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+ // (s bits must not be all set)
+ //
+ // A pattern is constructed of size bits, where the least significant S+1
+ // bits are set. The pattern is rotated right by R, and repeated across a
+ // 32 or 64-bit value, depending on destination register width.
+ //
+
+ if (n == 1) {
+ if (imm_s == 0x3F) {
+ return 0;
+ }
+ uint64_t bits = (1UL << (imm_s + 1)) - 1;
+ return RotateRight(bits, imm_r, 64);
+ } else {
+ if ((imm_s >> 1) == 0x1F) {
+ return 0;
+ }
+ for (int width = 0x20; width >= 0x2; width >>= 1) {
+ if ((imm_s & width) == 0) {
+ int mask = width - 1;
+ if ((imm_s & mask) == mask) {
+ return 0;
+ }
+ uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1;
+ return RepeatBitsAcrossReg(reg_size,
+ RotateRight(bits, imm_r & mask, width),
+ width);
+ }
+ }
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+float Instruction::ImmFP32() {
+ // ImmFP: abcdefgh (8 bits)
+ // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
+ // where B is b ^ 1
+ uint32_t bits = ImmFP();
+ uint32_t bit7 = (bits >> 7) & 0x1;
+ uint32_t bit6 = (bits >> 6) & 0x1;
+ uint32_t bit5_to_0 = bits & 0x3f;
+ uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
+
+ return rawbits_to_float(result);
+}
+
+
+double Instruction::ImmFP64() {
+ // ImmFP: abcdefgh (8 bits)
+ // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
+ // where B is b ^ 1
+ uint32_t bits = ImmFP();
+ uint64_t bit7 = (bits >> 7) & 0x1;
+ uint64_t bit6 = (bits >> 6) & 0x1;
+ uint64_t bit5_to_0 = bits & 0x3f;
+ uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
+
+ return rawbits_to_double(result);
+}
+
+
+LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
+ switch (op) {
+ case STP_x:
+ case LDP_x:
+ case STP_d:
+ case LDP_d: return LSDoubleWord;
+ default: return LSWord;
+ }
+}
+
+
+ptrdiff_t Instruction::ImmPCOffset() {
+ ptrdiff_t offset;
+ if (IsPCRelAddressing()) {
+ // PC-relative addressing. Only ADR is supported.
+ offset = ImmPCRel();
+ } else if (BranchType() != UnknownBranchType) {
+ // All PC-relative branches.
+ // Relative branch offsets are instruction-size-aligned.
+ offset = ImmBranch() << kInstructionSizeLog2;
+ } else {
+ // Load literal (offset from PC).
+ ASSERT(IsLdrLiteral());
+ // The offset is always shifted by 2 bits, even for loads to 64-bits
+ // registers.
+ offset = ImmLLiteral() << kInstructionSizeLog2;
+ }
+ return offset;
+}
+
+
+Instruction* Instruction::ImmPCOffsetTarget() {
+ return InstructionAtOffset(ImmPCOffset());
+}
+
+
+bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
+ int32_t offset) {
+ return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
+}
+
+
+bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
+ return IsValidImmPCOffset(BranchType(), DistanceTo(target));
+}
+
+
+void Instruction::SetImmPCOffsetTarget(Instruction* target) {
+ if (IsPCRelAddressing()) {
+ SetPCRelImmTarget(target);
+ } else if (BranchType() != UnknownBranchType) {
+ SetBranchImmTarget(target);
+ } else {
+ SetImmLLiteral(target);
+ }
+}
+
+
+void Instruction::SetPCRelImmTarget(Instruction* target) {
+ // ADRP is not supported, so 'this' must point to an ADR instruction.
+ ASSERT(Mask(PCRelAddressingMask) == ADR);
+
+ Instr imm = Assembler::ImmPCRelAddress(DistanceTo(target));
+
+ SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
+}
+
+
+void Instruction::SetBranchImmTarget(Instruction* target) {
+ ASSERT(IsAligned(DistanceTo(target), kInstructionSize));
+ Instr branch_imm = 0;
+ uint32_t imm_mask = 0;
+ ptrdiff_t offset = DistanceTo(target) >> kInstructionSizeLog2;
+ switch (BranchType()) {
+ case CondBranchType: {
+ branch_imm = Assembler::ImmCondBranch(offset);
+ imm_mask = ImmCondBranch_mask;
+ break;
+ }
+ case UncondBranchType: {
+ branch_imm = Assembler::ImmUncondBranch(offset);
+ imm_mask = ImmUncondBranch_mask;
+ break;
+ }
+ case CompareBranchType: {
+ branch_imm = Assembler::ImmCmpBranch(offset);
+ imm_mask = ImmCmpBranch_mask;
+ break;
+ }
+ case TestBranchType: {
+ branch_imm = Assembler::ImmTestBranch(offset);
+ imm_mask = ImmTestBranch_mask;
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ SetInstructionBits(Mask(~imm_mask) | branch_imm);
+}
+
+
+void Instruction::SetImmLLiteral(Instruction* source) {
+ ASSERT(IsAligned(DistanceTo(source), kInstructionSize));
+ ptrdiff_t offset = DistanceTo(source) >> kLiteralEntrySizeLog2;
+ Instr imm = Assembler::ImmLLiteral(offset);
+ Instr mask = ImmLLiteral_mask;
+
+ SetInstructionBits(Mask(~mask) | imm);
+}
+
+
+// TODO(jbramley): We can't put this inline in the class because things like
+// xzr and Register are not defined in that header. Consider adding
+// instructions-arm64-inl.h to work around this.
+bool InstructionSequence::IsInlineData() const {
+ // Inline data is encoded as a single movz instruction which writes to xzr
+ // (x31).
+ return IsMovz() && SixtyFourBits() && (Rd() == xzr.code());
+ // TODO(all): If we extend ::InlineData() to support bigger data, we need
+ // to update this method too.
+}
+
+
+// TODO(jbramley): We can't put this inline in the class because things like
+// xzr and Register are not defined in that header. Consider adding
+// instructions-arm64-inl.h to work around this.
+uint64_t InstructionSequence::InlineData() const {
+ ASSERT(IsInlineData());
+ uint64_t payload = ImmMoveWide();
+ // TODO(all): If we extend ::InlineData() to support bigger data, we need
+ // to update this method too.
+ return payload;
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
new file mode 100644
index 000000000..ab64cb2bf
--- /dev/null
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -0,0 +1,501 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_INSTRUCTIONS_ARM64_H_
+#define V8_ARM64_INSTRUCTIONS_ARM64_H_
+
+#include "globals.h"
+#include "utils.h"
+#include "arm64/constants-arm64.h"
+#include "arm64/utils-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ISA constants. --------------------------------------------------------------
+
+typedef uint32_t Instr;
+
+// The following macros initialize a float/double variable with a bit pattern
+// without using static initializers: If ARM64_DEFINE_FP_STATICS is defined, the
+// symbol is defined as uint32_t/uint64_t initialized with the desired bit
+// pattern. Otherwise, the same symbol is declared as an external float/double.
+#if defined(ARM64_DEFINE_FP_STATICS)
+#define DEFINE_FLOAT(name, value) extern const uint32_t name = value
+#define DEFINE_DOUBLE(name, value) extern const uint64_t name = value
+#else
+#define DEFINE_FLOAT(name, value) extern const float name
+#define DEFINE_DOUBLE(name, value) extern const double name
+#endif // defined(ARM64_DEFINE_FP_STATICS)
+
+DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000);
+DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000);
+DEFINE_DOUBLE(kFP64PositiveInfinity, 0x7ff0000000000000UL);
+DEFINE_DOUBLE(kFP64NegativeInfinity, 0xfff0000000000000UL);
+
+// This value is a signalling NaN as both a double and as a float (taking the
+// least-significant word).
+DEFINE_DOUBLE(kFP64SignallingNaN, 0x7ff000007f800001);
+DEFINE_FLOAT(kFP32SignallingNaN, 0x7f800001);
+
+// A similar value, but as a quiet NaN.
+DEFINE_DOUBLE(kFP64QuietNaN, 0x7ff800007fc00001);
+DEFINE_FLOAT(kFP32QuietNaN, 0x7fc00001);
+
+// The default NaN values (for FPCR.DN=1).
+DEFINE_DOUBLE(kFP64DefaultNaN, 0x7ff8000000000000UL);
+DEFINE_FLOAT(kFP32DefaultNaN, 0x7fc00000);
+
+#undef DEFINE_FLOAT
+#undef DEFINE_DOUBLE
+
+
+enum LSDataSize {
+ LSByte = 0,
+ LSHalfword = 1,
+ LSWord = 2,
+ LSDoubleWord = 3
+};
+
+LSDataSize CalcLSPairDataSize(LoadStorePairOp op);
+
+enum ImmBranchType {
+ UnknownBranchType = 0,
+ CondBranchType = 1,
+ UncondBranchType = 2,
+ CompareBranchType = 3,
+ TestBranchType = 4
+};
+
+enum AddrMode {
+ Offset,
+ PreIndex,
+ PostIndex
+};
+
+enum FPRounding {
+ // The first four values are encodable directly by FPCR<RMode>.
+ FPTieEven = 0x0,
+ FPPositiveInfinity = 0x1,
+ FPNegativeInfinity = 0x2,
+ FPZero = 0x3,
+
+ // The final rounding mode is only available when explicitly specified by the
+ // instruction (such as with fcvta). It cannot be set in FPCR.
+ FPTieAway
+};
+
+enum Reg31Mode {
+ Reg31IsStackPointer,
+ Reg31IsZeroRegister
+};
+
+// Instructions. ---------------------------------------------------------------
+
+class Instruction {
+ public:
+ V8_INLINE Instr InstructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ V8_INLINE void SetInstructionBits(Instr new_instr) {
+ *reinterpret_cast<Instr*>(this) = new_instr;
+ }
+
+ int Bit(int pos) const {
+ return (InstructionBits() >> pos) & 1;
+ }
+
+ uint32_t Bits(int msb, int lsb) const {
+ return unsigned_bitextract_32(msb, lsb, InstructionBits());
+ }
+
+ int32_t SignedBits(int msb, int lsb) const {
+ int32_t bits = *(reinterpret_cast<const int32_t*>(this));
+ return signed_bitextract_32(msb, lsb, bits);
+ }
+
+ Instr Mask(uint32_t mask) const {
+ return InstructionBits() & mask;
+ }
+
+ V8_INLINE Instruction* following(int count = 1) {
+ return InstructionAtOffset(count * static_cast<int>(kInstructionSize));
+ }
+
+ V8_INLINE Instruction* preceding(int count = 1) {
+ return following(-count);
+ }
+
+ #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
+ int64_t Name() const { return Func(HighBit, LowBit); }
+ INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
+ #undef DEFINE_GETTER
+
+ // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
+ // formed from ImmPCRelLo and ImmPCRelHi.
+ int ImmPCRel() const {
+ int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
+ int const width = ImmPCRelLo_width + ImmPCRelHi_width;
+ return signed_bitextract_32(width-1, 0, offset);
+ }
+
+ uint64_t ImmLogical();
+ float ImmFP32();
+ double ImmFP64();
+
+ LSDataSize SizeLSPair() const {
+ return CalcLSPairDataSize(
+ static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
+ }
+
+ // Helpers.
+ bool IsCondBranchImm() const {
+ return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
+ }
+
+ bool IsUncondBranchImm() const {
+ return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
+ }
+
+ bool IsCompareBranch() const {
+ return Mask(CompareBranchFMask) == CompareBranchFixed;
+ }
+
+ bool IsTestBranch() const {
+ return Mask(TestBranchFMask) == TestBranchFixed;
+ }
+
+ bool IsLdrLiteral() const {
+ return Mask(LoadLiteralFMask) == LoadLiteralFixed;
+ }
+
+ bool IsLdrLiteralX() const {
+ return Mask(LoadLiteralMask) == LDR_x_lit;
+ }
+
+ bool IsPCRelAddressing() const {
+ return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
+ }
+
+ bool IsLogicalImmediate() const {
+ return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
+ }
+
+ bool IsAddSubImmediate() const {
+ return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
+ }
+
+ bool IsAddSubExtended() const {
+ return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
+ }
+
+ // Match any loads or stores, including pairs.
+ bool IsLoadOrStore() const {
+ return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
+ }
+
+ // Match any loads, including pairs.
+ bool IsLoad() const;
+ // Match any stores, including pairs.
+ bool IsStore() const;
+
+ // Indicate whether Rd can be the stack pointer or the zero register. This
+ // does not check that the instruction actually has an Rd field.
+ Reg31Mode RdMode() const {
+ // The following instructions use csp or wsp as Rd:
+ // Add/sub (immediate) when not setting the flags.
+ // Add/sub (extended) when not setting the flags.
+ // Logical (immediate) when not setting the flags.
+ // Otherwise, r31 is the zero register.
+ if (IsAddSubImmediate() || IsAddSubExtended()) {
+ if (Mask(AddSubSetFlagsBit)) {
+ return Reg31IsZeroRegister;
+ } else {
+ return Reg31IsStackPointer;
+ }
+ }
+ if (IsLogicalImmediate()) {
+ // Of the logical (immediate) instructions, only ANDS (and its aliases)
+ // can set the flags. The others can all write into csp.
+ // Note that some logical operations are not available to
+ // immediate-operand instructions, so we have to combine two masks here.
+ if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
+ return Reg31IsZeroRegister;
+ } else {
+ return Reg31IsStackPointer;
+ }
+ }
+ return Reg31IsZeroRegister;
+ }
+
+ // Indicate whether Rn can be the stack pointer or the zero register. This
+ // does not check that the instruction actually has an Rn field.
+ Reg31Mode RnMode() const {
+ // The following instructions use csp or wsp as Rn:
+ // All loads and stores.
+ // Add/sub (immediate).
+ // Add/sub (extended).
+ // Otherwise, r31 is the zero register.
+ if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
+ return Reg31IsStackPointer;
+ }
+ return Reg31IsZeroRegister;
+ }
+
+ ImmBranchType BranchType() const {
+ if (IsCondBranchImm()) {
+ return CondBranchType;
+ } else if (IsUncondBranchImm()) {
+ return UncondBranchType;
+ } else if (IsCompareBranch()) {
+ return CompareBranchType;
+ } else if (IsTestBranch()) {
+ return TestBranchType;
+ } else {
+ return UnknownBranchType;
+ }
+ }
+
+ static int ImmBranchRangeBitwidth(ImmBranchType branch_type) {
+ switch (branch_type) {
+ case UncondBranchType:
+ return ImmUncondBranch_width;
+ case CondBranchType:
+ return ImmCondBranch_width;
+ case CompareBranchType:
+ return ImmCmpBranch_width;
+ case TestBranchType:
+ return ImmTestBranch_width;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+ }
+
+ // The range of the branch instruction, expressed as 'instr +- range'.
+ static int32_t ImmBranchRange(ImmBranchType branch_type) {
+ return
+ (1 << (ImmBranchRangeBitwidth(branch_type) + kInstructionSizeLog2)) / 2 -
+ kInstructionSize;
+ }
+
+ int ImmBranch() const {
+ switch (BranchType()) {
+ case CondBranchType: return ImmCondBranch();
+ case UncondBranchType: return ImmUncondBranch();
+ case CompareBranchType: return ImmCmpBranch();
+ case TestBranchType: return ImmTestBranch();
+ default: UNREACHABLE();
+ }
+ return 0;
+ }
+
+ bool IsBranchAndLinkToRegister() const {
+ return Mask(UnconditionalBranchToRegisterMask) == BLR;
+ }
+
+ bool IsMovz() const {
+ return (Mask(MoveWideImmediateMask) == MOVZ_x) ||
+ (Mask(MoveWideImmediateMask) == MOVZ_w);
+ }
+
+ bool IsMovk() const {
+ return (Mask(MoveWideImmediateMask) == MOVK_x) ||
+ (Mask(MoveWideImmediateMask) == MOVK_w);
+ }
+
+ bool IsMovn() const {
+ return (Mask(MoveWideImmediateMask) == MOVN_x) ||
+ (Mask(MoveWideImmediateMask) == MOVN_w);
+ }
+
+ bool IsNop(int n) {
+ // A marking nop is an instruction
+ // mov r<n>, r<n>
+ // which is encoded as
+ // orr r<n>, xzr, r<n>
+ return (Mask(LogicalShiftedMask) == ORR_x) &&
+ (Rd() == Rm()) &&
+ (Rd() == n);
+ }
+
+ // Find the PC offset encoded in this instruction. 'this' may be a branch or
+ // a PC-relative addressing instruction.
+ // The offset returned is unscaled.
+ ptrdiff_t ImmPCOffset();
+
+ // Find the target of this instruction. 'this' may be a branch or a
+ // PC-relative addressing instruction.
+ Instruction* ImmPCOffsetTarget();
+
+ static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset);
+ bool IsTargetInImmPCOffsetRange(Instruction* target);
+ // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
+ // a PC-relative addressing instruction.
+ void SetImmPCOffsetTarget(Instruction* target);
+ // Patch a literal load instruction to load from 'source'.
+ void SetImmLLiteral(Instruction* source);
+
+ uint8_t* LiteralAddress() {
+ int offset = ImmLLiteral() << kLiteralEntrySizeLog2;
+ return reinterpret_cast<uint8_t*>(this) + offset;
+ }
+
+ enum CheckAlignment { NO_CHECK, CHECK_ALIGNMENT };
+
+ V8_INLINE Instruction* InstructionAtOffset(
+ int64_t offset,
+ CheckAlignment check = CHECK_ALIGNMENT) {
+ Address addr = reinterpret_cast<Address>(this) + offset;
+ // The FUZZ_disasm test relies on no check being done.
+ ASSERT(check == NO_CHECK || IsAddressAligned(addr, kInstructionSize));
+ return Cast(addr);
+ }
+
+ template<typename T> V8_INLINE static Instruction* Cast(T src) {
+ return reinterpret_cast<Instruction*>(src);
+ }
+
+ V8_INLINE ptrdiff_t DistanceTo(Instruction* target) {
+ return reinterpret_cast<Address>(target) - reinterpret_cast<Address>(this);
+ }
+
+
+ void SetPCRelImmTarget(Instruction* target);
+ void SetBranchImmTarget(Instruction* target);
+};
+
+
+// Where Instruction looks at instructions generated by the Assembler,
+// InstructionSequence looks at instructions sequences generated by the
+// MacroAssembler.
+class InstructionSequence : public Instruction {
+ public:
+ static InstructionSequence* At(Address address) {
+ return reinterpret_cast<InstructionSequence*>(address);
+ }
+
+ // Sequences generated by MacroAssembler::InlineData().
+ bool IsInlineData() const;
+ uint64_t InlineData() const;
+};
+
+
+// Simulator/Debugger debug instructions ---------------------------------------
+// Each debug marker is represented by a HLT instruction. The immediate comment
+// field in the instruction is used to identify the type of debug marker. Each
+// marker encodes arguments in a different way, as described below.
+
+// Indicate to the Debugger that the instruction is a redirected call.
+const Instr kImmExceptionIsRedirectedCall = 0xca11;
+
+// Represent unreachable code. This is used as a guard in parts of the code that
+// should not be reachable, such as in data encoded inline in the instructions.
+const Instr kImmExceptionIsUnreachable = 0xdebf;
+
+// A pseudo 'printf' instruction. The arguments will be passed to the platform
+// printf method.
+const Instr kImmExceptionIsPrintf = 0xdeb1;
+// Parameters are stored in ARM64 registers as if the printf pseudo-instruction
+// was a call to the real printf method:
+//
+// x0: The format string, then either of:
+// x1-x7: Optional arguments.
+// d0-d7: Optional arguments.
+//
+// Floating-point and integer arguments are passed in separate sets of
+// registers in AAPCS64 (even for varargs functions), so it is not possible to
+// determine the type of location of each arguments without some information
+// about the values that were passed in. This information could be retrieved
+// from the printf format string, but the format string is not trivial to
+// parse so we encode the relevant information with the HLT instruction.
+// - Type
+// Either kRegister or kFPRegister, but stored as a uint32_t because there's
+// no way to guarantee the size of the CPURegister::RegisterType enum.
+const unsigned kPrintfTypeOffset = 1 * kInstructionSize;
+const unsigned kPrintfLength = 2 * kInstructionSize;
+
+// A pseudo 'debug' instruction.
+const Instr kImmExceptionIsDebug = 0xdeb0;
+// Parameters are inlined in the code after a debug pseudo-instruction:
+// - Debug code.
+// - Debug parameters.
+// - Debug message string. This is a NULL-terminated ASCII string, padded to
+// kInstructionSize so that subsequent instructions are correctly aligned.
+// - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
+// string data.
+const unsigned kDebugCodeOffset = 1 * kInstructionSize;
+const unsigned kDebugParamsOffset = 2 * kInstructionSize;
+const unsigned kDebugMessageOffset = 3 * kInstructionSize;
+
+// Debug parameters.
+// Used without a TRACE_ option, the Debugger will print the arguments only
+// once. Otherwise TRACE_ENABLE and TRACE_DISABLE will enable or disable tracing
+// before every instruction for the specified LOG_ parameters.
+//
+// TRACE_OVERRIDE enables the specified LOG_ parameters, and disabled any
+// others that were not specified.
+//
+// For example:
+//
+// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_FP_REGS);
+// will print the registers and fp registers only once.
+//
+// __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM);
+// starts disassembling the code.
+//
+// __ debug("trace rets", 2, TRACE_ENABLE | LOG_REGS);
+// adds the general purpose registers to the trace.
+//
+// __ debug("stop regs", 3, TRACE_DISABLE | LOG_REGS);
+// stops tracing the registers.
+const unsigned kDebuggerTracingDirectivesMask = 3 << 6;
+enum DebugParameters {
+ NO_PARAM = 0,
+ BREAK = 1 << 0,
+ LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
+ LOG_REGS = 1 << 2, // Log general purpose registers.
+ LOG_FP_REGS = 1 << 3, // Log floating-point registers.
+ LOG_SYS_REGS = 1 << 4, // Log the status flags.
+ LOG_WRITE = 1 << 5, // Log any memory write.
+
+ LOG_STATE = LOG_REGS | LOG_FP_REGS | LOG_SYS_REGS,
+ LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
+
+ // Trace control.
+ TRACE_ENABLE = 1 << 6,
+ TRACE_DISABLE = 2 << 6,
+ TRACE_OVERRIDE = 3 << 6
+};
+
+
+} } // namespace v8::internal
+
+
+#endif // V8_ARM64_INSTRUCTIONS_ARM64_H_
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc
new file mode 100644
index 000000000..6744707fd
--- /dev/null
+++ b/deps/v8/src/arm64/instrument-arm64.cc
@@ -0,0 +1,618 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "arm64/instrument-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+Counter::Counter(const char* name, CounterType type)
+ : count_(0), enabled_(false), type_(type) {
+ ASSERT(name != NULL);
+ strncpy(name_, name, kCounterNameMaxLength);
+}
+
+
+void Counter::Enable() {
+ enabled_ = true;
+}
+
+
+void Counter::Disable() {
+ enabled_ = false;
+}
+
+
+bool Counter::IsEnabled() {
+ return enabled_;
+}
+
+
+void Counter::Increment() {
+ if (enabled_) {
+ count_++;
+ }
+}
+
+
+uint64_t Counter::count() {
+ uint64_t result = count_;
+ if (type_ == Gauge) {
+ // If the counter is a Gauge, reset the count after reading.
+ count_ = 0;
+ }
+ return result;
+}
+
+
+const char* Counter::name() {
+ return name_;
+}
+
+
+CounterType Counter::type() {
+ return type_;
+}
+
+
+typedef struct {
+ const char* name;
+ CounterType type;
+} CounterDescriptor;
+
+
+static const CounterDescriptor kCounterList[] = {
+ {"Instruction", Cumulative},
+
+ {"Move Immediate", Gauge},
+ {"Add/Sub DP", Gauge},
+ {"Logical DP", Gauge},
+ {"Other Int DP", Gauge},
+ {"FP DP", Gauge},
+
+ {"Conditional Select", Gauge},
+ {"Conditional Compare", Gauge},
+
+ {"Unconditional Branch", Gauge},
+ {"Compare and Branch", Gauge},
+ {"Test and Branch", Gauge},
+ {"Conditional Branch", Gauge},
+
+ {"Load Integer", Gauge},
+ {"Load FP", Gauge},
+ {"Load Pair", Gauge},
+ {"Load Literal", Gauge},
+
+ {"Store Integer", Gauge},
+ {"Store FP", Gauge},
+ {"Store Pair", Gauge},
+
+ {"PC Addressing", Gauge},
+ {"Other", Gauge},
+ {"SP Adjust", Gauge},
+};
+
+
+Instrument::Instrument(const char* datafile, uint64_t sample_period)
+ : output_stream_(stderr), sample_period_(sample_period) {
+
+ // Set up the output stream. If datafile is non-NULL, use that file. If it
+ // can't be opened, or datafile is NULL, use stderr.
+ if (datafile != NULL) {
+ output_stream_ = fopen(datafile, "w");
+ if (output_stream_ == NULL) {
+ fprintf(stderr, "Can't open output file %s. Using stderr.\n", datafile);
+ output_stream_ = stderr;
+ }
+ }
+
+ static const int num_counters =
+ sizeof(kCounterList) / sizeof(CounterDescriptor);
+
+ // Dump an instrumentation description comment at the top of the file.
+ fprintf(output_stream_, "# counters=%d\n", num_counters);
+ fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_);
+
+ // Construct Counter objects from counter description array.
+ for (int i = 0; i < num_counters; i++) {
+ Counter* counter = new Counter(kCounterList[i].name, kCounterList[i].type);
+ counters_.push_back(counter);
+ }
+
+ DumpCounterNames();
+}
+
+
+Instrument::~Instrument() {
+ // Dump any remaining instruction data to the output file.
+ DumpCounters();
+
+ // Free all the counter objects.
+ std::list<Counter*>::iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ delete *it;
+ }
+
+ if (output_stream_ != stderr) {
+ fclose(output_stream_);
+ }
+}
+
+
+void Instrument::Update() {
+ // Increment the instruction counter, and dump all counters if a sample period
+ // has elapsed.
+ static Counter* counter = GetCounter("Instruction");
+ ASSERT(counter->type() == Cumulative);
+ counter->Increment();
+
+ if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) {
+ DumpCounters();
+ }
+}
+
+
+void Instrument::DumpCounters() {
+ // Iterate through the counter objects, dumping their values to the output
+ // stream.
+ std::list<Counter*>::const_iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ fprintf(output_stream_, "%" PRIu64 ",", (*it)->count());
+ }
+ fprintf(output_stream_, "\n");
+ fflush(output_stream_);
+}
+
+
+void Instrument::DumpCounterNames() {
+ // Iterate through the counter objects, dumping the counter names to the
+ // output stream.
+ std::list<Counter*>::const_iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ fprintf(output_stream_, "%s,", (*it)->name());
+ }
+ fprintf(output_stream_, "\n");
+ fflush(output_stream_);
+}
+
+
+void Instrument::HandleInstrumentationEvent(unsigned event) {
+ switch (event) {
+ case InstrumentStateEnable: Enable(); break;
+ case InstrumentStateDisable: Disable(); break;
+ default: DumpEventMarker(event);
+ }
+}
+
+
+void Instrument::DumpEventMarker(unsigned marker) {
+ // Dumpan event marker to the output stream as a specially formatted comment
+ // line.
+ static Counter* counter = GetCounter("Instruction");
+
+ fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xff,
+ (marker >> 8) & 0xff, counter->count());
+}
+
+
+Counter* Instrument::GetCounter(const char* name) {
+ // Get a Counter object by name from the counter list.
+ std::list<Counter*>::const_iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ if (strcmp((*it)->name(), name) == 0) {
+ return *it;
+ }
+ }
+
+ // A Counter by that name does not exist: print an error message to stderr
+ // and the output file, and exit.
+ static const char* error_message =
+ "# Error: Unknown counter \"%s\". Exiting.\n";
+ fprintf(stderr, error_message, name);
+ fprintf(output_stream_, error_message, name);
+ exit(1);
+}
+
+
+void Instrument::Enable() {
+ std::list<Counter*>::iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ (*it)->Enable();
+ }
+}
+
+
+void Instrument::Disable() {
+ std::list<Counter*>::iterator it;
+ for (it = counters_.begin(); it != counters_.end(); it++) {
+ (*it)->Disable();
+ }
+}
+
+
+void Instrument::VisitPCRelAddressing(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("PC Addressing");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubImmediate(Instruction* instr) {
+ Update();
+ static Counter* sp_counter = GetCounter("SP Adjust");
+ static Counter* add_sub_counter = GetCounter("Add/Sub DP");
+ if (((instr->Mask(AddSubOpMask) == SUB) ||
+ (instr->Mask(AddSubOpMask) == ADD)) &&
+ (instr->Rd() == 31) && (instr->Rn() == 31)) {
+ // Count adjustments to the C stack pointer caused by V8 needing two SPs.
+ sp_counter->Increment();
+ } else {
+ add_sub_counter->Increment();
+ }
+}
+
+
+void Instrument::VisitLogicalImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Logical DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitMoveWideImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Move Immediate");
+
+ if (instr->IsMovn() && (instr->Rd() == kZeroRegCode)) {
+ unsigned imm = instr->ImmMoveWide();
+ HandleInstrumentationEvent(imm);
+ } else {
+ counter->Increment();
+ }
+}
+
+
+void Instrument::VisitBitfield(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitExtract(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnconditionalBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Unconditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnconditionalBranchToRegister(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Unconditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitCompareBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Compare and Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitTestBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Test and Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalBranch(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitSystem(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::VisitException(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::InstrumentLoadStorePair(Instruction* instr) {
+ static Counter* load_pair_counter = GetCounter("Load Pair");
+ static Counter* store_pair_counter = GetCounter("Store Pair");
+ if (instr->Mask(LoadStorePairLBit) != 0) {
+ load_pair_counter->Increment();
+ } else {
+ store_pair_counter->Increment();
+ }
+}
+
+
+void Instrument::VisitLoadStorePairPostIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairPreIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairNonTemporal(Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadLiteral(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Load Literal");
+ counter->Increment();
+}
+
+
+void Instrument::InstrumentLoadStore(Instruction* instr) {
+ static Counter* load_int_counter = GetCounter("Load Integer");
+ static Counter* store_int_counter = GetCounter("Store Integer");
+ static Counter* load_fp_counter = GetCounter("Load FP");
+ static Counter* store_fp_counter = GetCounter("Store FP");
+
+ switch (instr->Mask(LoadStoreOpMask)) {
+ case STRB_w: // Fall through.
+ case STRH_w: // Fall through.
+ case STR_w: // Fall through.
+ case STR_x: store_int_counter->Increment(); break;
+ case STR_s: // Fall through.
+ case STR_d: store_fp_counter->Increment(); break;
+ case LDRB_w: // Fall through.
+ case LDRH_w: // Fall through.
+ case LDR_w: // Fall through.
+ case LDR_x: // Fall through.
+ case LDRSB_x: // Fall through.
+ case LDRSH_x: // Fall through.
+ case LDRSW_x: // Fall through.
+ case LDRSB_w: // Fall through.
+ case LDRSH_w: load_int_counter->Increment(); break;
+ case LDR_s: // Fall through.
+ case LDR_d: load_fp_counter->Increment(); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void Instrument::VisitLoadStoreUnscaledOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStorePostIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStorePreIndex(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStoreRegisterOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStoreUnsignedOffset(Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLogicalShifted(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Logical DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubShifted(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubExtended(Instruction* instr) {
+ Update();
+ static Counter* sp_counter = GetCounter("SP Adjust");
+ static Counter* add_sub_counter = GetCounter("Add/Sub DP");
+ if (((instr->Mask(AddSubOpMask) == SUB) ||
+ (instr->Mask(AddSubOpMask) == ADD)) &&
+ (instr->Rd() == 31) && (instr->Rn() == 31)) {
+ // Count adjustments to the C stack pointer caused by V8 needing two SPs.
+ sp_counter->Increment();
+ } else {
+ add_sub_counter->Increment();
+ }
+}
+
+
+void Instrument::VisitAddSubWithCarry(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalCompareRegister(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalCompareImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalSelect(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Select");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing1Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing2Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing3Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPCompare(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPConditionalCompare(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPConditionalSelect(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Conditional Select");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPImmediate(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing1Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing2Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing3Source(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPIntegerConvert(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPFixedPointConvert(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnallocated(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnimplemented(Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/arm64/instrument-arm64.h b/deps/v8/src/arm64/instrument-arm64.h
new file mode 100644
index 000000000..996cc07ac
--- /dev/null
+++ b/deps/v8/src/arm64/instrument-arm64.h
@@ -0,0 +1,107 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_INSTRUMENT_ARM64_H_
+#define V8_ARM64_INSTRUMENT_ARM64_H_
+
+#include "globals.h"
+#include "utils.h"
+#include "arm64/decoder-arm64.h"
+#include "arm64/constants-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+const int kCounterNameMaxLength = 256;
+const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;
+
+
+enum InstrumentState {
+ InstrumentStateDisable = 0,
+ InstrumentStateEnable = 1
+};
+
+
+enum CounterType {
+ Gauge = 0, // Gauge counters reset themselves after reading.
+ Cumulative = 1 // Cumulative counters keep their value after reading.
+};
+
+
+class Counter {
+ public:
+ Counter(const char* name, CounterType type = Gauge);
+
+ void Increment();
+ void Enable();
+ void Disable();
+ bool IsEnabled();
+ uint64_t count();
+ const char* name();
+ CounterType type();
+
+ private:
+ char name_[kCounterNameMaxLength];
+ uint64_t count_;
+ bool enabled_;
+ CounterType type_;
+};
+
+
+class Instrument: public DecoderVisitor {
+ public:
+ explicit Instrument(const char* datafile = NULL,
+ uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
+ ~Instrument();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ private:
+ void Update();
+ void Enable();
+ void Disable();
+ void DumpCounters();
+ void DumpCounterNames();
+ void DumpEventMarker(unsigned marker);
+ void HandleInstrumentationEvent(unsigned event);
+ Counter* GetCounter(const char* name);
+
+ void InstrumentLoadStore(Instruction* instr);
+ void InstrumentLoadStorePair(Instruction* instr);
+
+ std::list<Counter*> counters_;
+
+ FILE *output_stream_;
+ uint64_t sample_period_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_INSTRUMENT_ARM64_H_
diff --git a/deps/v8/src/arm64/lithium-arm64.cc b/deps/v8/src/arm64/lithium-arm64.cc
new file mode 100644
index 000000000..60bf51ebb
--- /dev/null
+++ b/deps/v8/src/arm64/lithium-arm64.cc
@@ -0,0 +1,2576 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "lithium-allocator-inl.h"
+#include "arm64/lithium-arm64.h"
+#include "arm64/lithium-codegen-arm64.h"
+#include "hydrogen-osr.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define DEFINE_COMPILE(type) \
+ void L##type::CompileToNative(LCodeGen* generator) { \
+ generator->Do##type(this); \
+ }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as temporaries and
+ // outputs because all registers are blocked by the calling convention.
+ // Inputs operands must use a fixed register or use-at-start policy or
+ // a non-register policy.
+ ASSERT(Output() == NULL ||
+ LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||
+ operand->IsUsedAtStart());
+ }
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+ }
+}
+#endif
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+ LGap::PrintDataTo(stream);
+ LLabel* rep = replacement();
+ if (rep != NULL) {
+ stream->Add(" Dead block replaced with B%d", rep->block_id());
+ }
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+ arguments()->PrintTo(stream);
+ stream->Add(" length ");
+ length()->PrintTo(stream);
+ stream->Add(" index ");
+ index()->PrintTo(stream);
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+ value()->PrintTo(stream);
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < InputCount(); i++) {
+ InputAt(i)->PrintTo(stream);
+ stream->Add(" ");
+ }
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ElementsKind kind = hydrogen()->elements_kind();
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if class_of_test(");
+ value()->PrintTo(stream);
+ stream->Add(", \"%o\") then B%d else B%d",
+ *hydrogen()->class_name(),
+ true_block_id(),
+ false_block_id());
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if ");
+ left()->PrintTo(stream);
+ stream->Add(" %s ", Token::String(op()));
+ right()->PrintTo(stream);
+ stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_cached_array_index(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+ return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d", block_id());
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ base_object()->PrintTo(stream);
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ function()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+ stream->Add("%s ", this->Mnemonic());
+
+ PrintOutputOperandTo(stream);
+
+ PrintDataTo(stream);
+
+ if (HasEnvironment()) {
+ stream->Add(" ");
+ environment()->PrintTo(stream);
+ }
+
+ if (HasPointerMap()) {
+ stream->Add(" ");
+ pointer_map()->PrintTo(stream);
+ }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ for (int i = 0; i < InputCount(); i++) {
+ if (i > 0) stream->Add(" ");
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
+ }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+ if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_instance_type(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_object(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_string(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_smi(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if typeof ");
+ value()->PrintTo(stream);
+ stream->Add(" == \"%s\" then B%d else B%d",
+ hydrogen()->type_literal()->ToCString().get(),
+ true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ value()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+bool LGap::IsRedundant() const {
+ for (int i = 0; i < 4; i++) {
+ if ((parallel_moves_[i] != NULL) && !parallel_moves_[i]->IsRedundant()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < 4; i++) {
+ stream->Add("(");
+ if (parallel_moves_[i] != NULL) {
+ parallel_moves_[i]->PrintDataTo(stream);
+ }
+ stream->Add(") ");
+ }
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ context()->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ hydrogen()->access().PrintTo(stream);
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(String::cast(*name())->ToCString().get());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if string_compare(");
+ left()->PrintTo(stream);
+ right()->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("%p -> %p", *original_map(), *transitioned_map());
+}
+
+
+template<int T>
+void LUnaryMathOperation<T>::PrintDataTo(StringStream* stream) {
+ value()->PrintTo(stream);
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-d";
+ case Token::SUB: return "sub-d";
+ case Token::MUL: return "mul-d";
+ case Token::DIV: return "div-d";
+ case Token::MOD: return "mod-d";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-t";
+ case Token::SUB: return "sub-t";
+ case Token::MUL: return "mul-t";
+ case Token::MOD: return "mod-t";
+ case Token::DIV: return "div-t";
+ case Token::BIT_AND: return "bit-and-t";
+ case Token::BIT_OR: return "bit-or-t";
+ case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
+ case Token::SHL: return "shl-t";
+ case Token::SAR: return "sar-t";
+ case Token::SHR: return "shr-t";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+void LChunkBuilder::Abort(BailoutReason reason) {
+ info()->set_bailout_reason(reason);
+ status_ = ABORTED;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+ return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+ return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+ DoubleRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+ if (value->EmitAtUses()) {
+ HInstruction* instr = HInstruction::cast(value);
+ VisitInstruction(instr);
+ }
+ operand->set_virtual_register(value->id());
+ return operand;
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value,
+ DoubleRegister fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAndClobber(HValue* value) {
+ return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+ return Use(value,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+ LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+ return value->IsConstant() ? UseConstant(value) : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+ return value->IsConstant() ? UseConstant(value) : UseRegisterAtStart(value);
+}
+
+
+LConstantOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+ return value->IsConstant()
+ ? UseConstant(value)
+ : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result) {
+ result->set_virtual_register(current_instruction_->id());
+ instr->set_result(result);
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+ LTemplateResultInstruction<1>* instr, int index) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+ LTemplateResultInstruction<1>* instr) {
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(
+ LTemplateResultInstruction<1>* instr, Register reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+ LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
+ instr = AssignPointerMap(instr);
+
+ // If instruction does not have side-effects lazy deoptimization
+ // after the call will try to deoptimize to the point before the call.
+ // Thus we still need to attach environment to this call even if
+ // call sequence can not deoptimize eagerly.
+ bool needs_environment =
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+ !hinstr->HasObservableSideEffects();
+ if (needs_environment && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+ ASSERT(!instr->HasPointerMap());
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
+ return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+ LUnallocated* operand =
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+ int vreg = allocator_->GetVirtualRegister();
+ if (!allocator_->AllocationOk()) {
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ vreg = 0;
+ }
+ operand->set_virtual_register(vreg);
+ return operand;
+}
+
+
+int LPlatformChunk::GetNextSpillIndex() {
+ return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex();
+ if (kind == DOUBLE_REGISTERS) {
+ return LDoubleStackSlot::Create(index, zone());
+ } else {
+ ASSERT(kind == GENERAL_REGISTERS);
+ return LStackSlot::Create(index, zone());
+ }
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ ASSERT(operand->HasFixedPolicy());
+ return operand;
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+ ASSERT(is_unused());
+ chunk_ = new(zone()) LPlatformChunk(info_, graph_);
+ LPhase phase("L_Building chunk", chunk_);
+ status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ // TODO(all): GetNextSpillIndex just increments a field. It has no other
+ // side effects, so we should get rid of this loop.
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex();
+ }
+ }
+
+ const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
+ for (int i = 0; i < blocks->length(); i++) {
+ DoBasicBlock(blocks->at(i));
+ if (is_aborted()) return NULL;
+ }
+ status_ = DONE;
+ return chunk_;
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block) {
+ ASSERT(is_building());
+ current_block_ = block;
+
+ if (block->IsStartBlock()) {
+ block->UpdateEnvironment(graph_->start_environment());
+ argument_count_ = 0;
+ } else if (block->predecessors()->length() == 1) {
+ // We have a single predecessor => copy environment and outgoing
+ // argument count from the predecessor.
+ ASSERT(block->phis()->length() == 0);
+ HBasicBlock* pred = block->predecessors()->at(0);
+ HEnvironment* last_environment = pred->last_environment();
+ ASSERT(last_environment != NULL);
+
+ // Only copy the environment, if it is later used again.
+ if (pred->end()->SecondSuccessor() == NULL) {
+ ASSERT(pred->end()->FirstSuccessor() == block);
+ } else {
+ if ((pred->end()->FirstSuccessor()->block_id() > block->block_id()) ||
+ (pred->end()->SecondSuccessor()->block_id() > block->block_id())) {
+ last_environment = last_environment->Copy();
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ ASSERT(pred->argument_count() >= 0);
+ argument_count_ = pred->argument_count();
+ } else {
+ // We are at a state join => process phis.
+ HBasicBlock* pred = block->predecessors()->at(0);
+ // No need to copy the environment, it cannot be used later.
+ HEnvironment* last_environment = pred->last_environment();
+ for (int i = 0; i < block->phis()->length(); ++i) {
+ HPhi* phi = block->phis()->at(i);
+ if (phi->HasMergedIndex()) {
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
+ }
+ for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+ if (block->deleted_phis()->at(i) < last_environment->length()) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ // Pick up the outgoing argument count of one of the predecessors.
+ argument_count_ = pred->argument_count();
+ }
+
+ // Translate hydrogen instructions to lithium ones for the current block.
+ HInstruction* current = block->first();
+ int start = chunk_->instructions()->length();
+ while ((current != NULL) && !is_aborted()) {
+ // Code for constants in registers is generated lazily.
+ if (!current->EmitAtUses()) {
+ VisitInstruction(current);
+ }
+ current = current->next();
+ }
+ int end = chunk_->instructions()->length() - 1;
+ if (end >= start) {
+ block->set_first_instruction_index(start);
+ block->set_last_instruction_index(end);
+ }
+ block->set_argument_count(argument_count_);
+ current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+ HInstruction* old_current = current_instruction_;
+ current_instruction_ = current;
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new(zone()) LDummy());
+ } else {
+ ASSERT(!current->OperandAt(0)->IsControlInstruction());
+ instr = DefineAsRegister(new(zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ if (current->OperandAt(i)->IsControlInstruction()) continue;
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
+
+ if (instr != NULL) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(current);
+
+#if DEBUG
+ // Make sure that the lithium instruction has either no fixed register
+ // constraints in temps or the result OR no uses that are only used at
+ // start. If this invariant doesn't hold, the register allocator can decide
+ // to insert a split of a range immediately before the instruction due to an
+ // already allocated register needing to be used for the instruction's fixed
+ // register constraint. In this case, the register allocator won't see an
+ // interference between the split child and the use-at-start (it would if
+ // the it was just a plain use), so it is free to move the split child into
+ // the same register that is used for the use-at-start.
+ // See https://code.google.com/p/chromium/issues/detail?id=201590
+ if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
+ int fixed = 0;
+ int used_at_start = 0;
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->IsUsedAtStart()) ++used_at_start;
+ }
+ if (instr->Output() != NULL) {
+ if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+ }
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
+ if (operand->HasFixedPolicy()) ++fixed;
+ }
+ ASSERT(fixed == 0 || used_at_start == 0);
+ }
+#endif
+
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ chunk_->AddInstruction(instr, current_block_);
+
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = current;
+ LInstruction* instruction_needing_environment = NULL;
+ if (current->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(current->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
+ }
+ }
+ }
+ current_instruction_ = old_current;
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+ HEnvironment* hydrogen_env = current_block_->last_environment();
+ int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator,
+ &objects_to_materialize));
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+
+ if (op == Token::MOD) {
+ LOperand* left = UseFixedDouble(instr->left(), d0);
+ LOperand* right = UseFixedDouble(instr->right(), d1);
+ LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
+ return MarkAsCall(DefineFixedDouble(result, d0), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineAsRegister(result);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr) {
+ ASSERT((op == Token::ADD) || (op == Token::SUB) || (op == Token::MUL) ||
+ (op == Token::DIV) || (op == Token::MOD) || (op == Token::SHR) ||
+ (op == Token::SHL) || (op == Token::SAR) || (op == Token::ROR) ||
+ (op == Token::BIT_OR) || (op == Token::BIT_AND) ||
+ (op == Token::BIT_XOR));
+ HValue* left = instr->left();
+ HValue* right = instr->right();
+
+ // TODO(jbramley): Once we've implemented smi support for all arithmetic
+ // operations, these assertions should check IsTagged().
+ ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(left->representation().IsSmiOrTagged());
+ ASSERT(right->representation().IsSmiOrTagged());
+
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left_operand = UseFixed(left, x1);
+ LOperand* right_operand = UseFixed(right, x0);
+ LArithmeticT* result =
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+ HBoundsCheckBaseIndexInformation* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* args = NULL;
+ LOperand* length = NULL;
+ LOperand* index = NULL;
+
+ if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
+ args = UseRegisterAtStart(instr->arguments());
+ length = UseConstant(instr->length());
+ index = UseConstant(instr->index());
+ } else {
+ args = UseRegister(instr->arguments());
+ length = UseRegisterAtStart(instr->length());
+ index = UseRegisterOrConstantAtStart(instr->index());
+ }
+
+ return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right =
+ UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+ LInstruction* result = instr->representation().IsSmi() ?
+ DefineAsRegister(new(zone()) LAddS(left, right)) :
+ DefineAsRegister(new(zone()) LAddI(left, right));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsExternal()) {
+ ASSERT(instr->left()->representation().IsExternal());
+ ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return DefineAsRegister(new(zone()) LAddE(left, right));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::ADD, instr);
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ return DoArithmeticT(Token::ADD, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* size = UseRegisterOrConstant(instr->size());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = instr->MustPrefillWithFiller() ? TempRegister() : NULL;
+ LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2, temp3);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+ LOperand* function = UseFixed(instr->function(), x1);
+ LOperand* receiver = UseFixed(instr->receiver(), x0);
+ LOperand* length = UseFixed(instr->length(), x2);
+ LOperand* elements = UseFixed(instr->elements(), x3);
+ LApplyArguments* result = new(zone()) LApplyArguments(function,
+ receiver,
+ length,
+ elements);
+ return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* temp = instr->from_inlined() ? NULL : TempRegister();
+ return DefineAsRegister(new(zone()) LArgumentsElements(temp));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
+ info()->MarkAsRequiresFrame();
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LArgumentsLength(value));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right =
+ UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+ return instr->representation().IsSmi() ?
+ DefineAsRegister(new(zone()) LBitS(left, right)) :
+ DefineAsRegister(new(zone()) LBitI(left, right));
+ } else {
+ return DoArithmeticT(instr->op(), instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+ // V8 expects a label to be generated for each basic block.
+ // This is used in some places like LAllocator::IsBlockBoundary
+ // in lithium-allocator.cc
+ return new(zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+ LOperand* value = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* length = UseRegister(instr->length());
+ return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ HValue* value = instr->value();
+ Representation r = value->representation();
+ HType type = value->type();
+
+ if (r.IsInteger32() || r.IsSmi() || r.IsDouble()) {
+ // These representations have simple checks that cannot deoptimize.
+ return new(zone()) LBranch(UseRegister(value), NULL, NULL);
+ } else {
+ ASSERT(r.IsTagged());
+ if (type.IsBoolean() || type.IsSmi() || type.IsJSArray() ||
+ type.IsHeapNumber()) {
+ // These types have simple checks that cannot deoptimize.
+ return new(zone()) LBranch(UseRegister(value), NULL, NULL);
+ }
+
+ if (type.IsString()) {
+ // This type cannot deoptimize, but needs a scratch register.
+ return new(zone()) LBranch(UseRegister(value), TempRegister(), NULL);
+ }
+
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ bool needs_temps = expected.NeedsMap() || expected.IsEmpty();
+ LOperand* temp1 = needs_temps ? TempRegister() : NULL;
+ LOperand* temp2 = needs_temps ? TempRegister() : NULL;
+
+ if (expected.IsGeneric() || expected.IsEmpty()) {
+ // The generic case cannot deoptimize because it already supports every
+ // possible input type.
+ ASSERT(needs_temps);
+ return new(zone()) LBranch(UseRegister(value), temp1, temp2);
+ } else {
+ return AssignEnvironment(
+ new(zone()) LBranch(UseRegister(value), temp1, temp2));
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(
+ HCallJSFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), x1);
+
+ LCallJSFunction* result = new(zone()) LCallJSFunction(function);
+
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+ HCallWithDescriptor* instr) {
+ const CallInterfaceDescriptor* descriptor = instr->descriptor();
+
+ LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+ ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+ ops.Add(target, zone());
+ for (int i = 1; i < instr->OperandCount(); i++) {
+ LOperand* op = UseFixed(instr->OperandAt(i),
+ descriptor->GetParameterRegister(i - 1));
+ ops.Add(op, zone());
+ }
+
+ LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(descriptor,
+ ops,
+ zone());
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* function = UseFixed(instr->function(), x1);
+ LCallFunction* call = new(zone()) LCallFunction(context, function);
+ return MarkAsCall(DefineFixed(call, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // The call to CallConstructStub will expect the constructor to be in x1.
+ LOperand* constructor = UseFixed(instr->constructor(), x1);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // The call to ArrayConstructCode will expect the constructor to be in x1.
+ LOperand* constructor = UseFixed(instr->constructor(), x1);
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+ Representation from = instr->from();
+ Representation to = instr->to();
+
+ if (from.IsSmi()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(instr->value());
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ from = Representation::Tagged();
+ }
+
+ if (from.IsTagged()) {
+ if (to.IsDouble()) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
+ return AssignEnvironment(DefineAsRegister(res));
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegister(instr->value());
+ if (instr->value()->type().IsSmi()) {
+ return DefineSameAsFirst(new(zone()) LDummyUse(value));
+ }
+ return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
+ } else {
+ ASSERT(to.IsInteger32());
+ LInstruction* res = NULL;
+
+ if (instr->value()->type().IsSmi() ||
+ instr->value()->representation().IsSmi()) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
+ } else {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = instr->CanTruncateToInt32() ? NULL : FixedTemp(d24);
+ res = DefineAsRegister(new(zone()) LTaggedToI(value, temp1, temp2));
+ res = AssignEnvironment(res);
+ }
+
+ return res;
+ }
+ } else if (from.IsDouble()) {
+ if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+
+ LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+ } else {
+ ASSERT(to.IsSmi() || to.IsInteger32());
+ LOperand* value = UseRegister(instr->value());
+
+ if (instr->CanTruncateToInt32()) {
+ LTruncateDoubleToIntOrSmi* result =
+ new(zone()) LTruncateDoubleToIntOrSmi(value);
+ return DefineAsRegister(result);
+ } else {
+ LDoubleToIntOrSmi* result = new(zone()) LDoubleToIntOrSmi(value);
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+ }
+ } else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
+ if (to.IsTagged()) {
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ LOperand* value = UseRegister(instr->value());
+ LNumberTagU* result = new(zone()) LNumberTagU(value,
+ TempRegister(),
+ TempRegister());
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ STATIC_ASSERT((kMinInt == Smi::kMinValue) &&
+ (kMaxInt == Smi::kMaxValue));
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LSmiTag(value));
+ }
+ } else if (to.IsSmi()) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else {
+ ASSERT(to.IsDouble());
+ if (instr->value()->CheckFlag(HInstruction::kUint32)) {
+ return DefineAsRegister(
+ new(zone()) LUint32ToDouble(UseRegisterAtStart(instr->value())));
+ } else {
+ return DefineAsRegister(
+ new(zone()) LInteger32ToDouble(UseRegisterAtStart(instr->value())));
+ }
+ }
+ }
+
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckValue(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ LInstruction* result = new(zone()) LCheckInstanceType(value, temp);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+ if (instr->CanOmitMapChecks()) {
+ // LCheckMaps does nothing in this case.
+ return new(zone()) LCheckMaps(NULL);
+ } else {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+
+ if (instr->has_migration_target()) {
+ info()->MarkAsDeferredCalling();
+ LInstruction* result = new(zone()) LCheckMaps(value, temp);
+ return AssignPointerMap(AssignEnvironment(result));
+ } else {
+ return AssignEnvironment(new(zone()) LCheckMaps(value, temp));
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckNonSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg));
+ } else if (input_rep.IsInteger32()) {
+ return DefineAsRegister(new(zone()) LClampIToUint8(reg));
+ } else {
+ ASSERT(input_rep.IsSmiOrTagged());
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LClampTToUint8(reg,
+ TempRegister(),
+ FixedTemp(d24))));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LClassOfTestAndBranch(value,
+ TempRegister(),
+ TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
+ Representation r = instr->representation();
+
+ if (r.IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(r));
+ ASSERT(instr->right()->representation().Equals(r));
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return new(zone()) LCompareNumericAndBranch(left, right);
+ } else {
+ ASSERT(r.IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ // TODO(all): In fact the only case that we can handle more efficiently is
+ // when one of the operand is the constant 0. Currently the MacroAssembler
+ // will be able to cope with any constant by loading it into an internal
+ // scratch register. This means that if the constant is used more that once,
+ // it will be loaded multiple times. Unfortunatly crankshaft already
+ // duplicates constant loads, but we should modify the code below once this
+ // issue has been addressed in crankshaft.
+ LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ return new(zone()) LCompareNumericAndBranch(left, right);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), x1);
+ LOperand* right = UseFixed(instr->right(), x0);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
+ LOperand* value = UseRegister(instr->value());
+ if (instr->representation().IsTagged()) {
+ return new(zone()) LCmpHoleAndBranchT(value);
+ } else {
+ LOperand* temp = TempRegister();
+ return new(zone()) LCmpHoleAndBranchD(value, temp);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return new(zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new(zone()) LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+ Representation r = instr->representation();
+ if (r.IsSmi()) {
+ return DefineAsRegister(new(zone()) LConstantS);
+ } else if (r.IsInteger32()) {
+ return DefineAsRegister(new(zone()) LConstantI);
+ } else if (r.IsDouble()) {
+ return DefineAsRegister(new(zone()) LConstantD);
+ } else if (r.IsExternal()) {
+ return DefineAsRegister(new(zone()) LConstantE);
+ } else if (r.IsTagged()) {
+ return DefineAsRegister(new(zone()) LConstantT);
+ } else {
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, cp);
+ }
+
+ return DefineAsRegister(new(zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+ LOperand* object = UseFixed(instr->value(), x0);
+ LDateField* result = new(zone()) LDateField(object, instr->index());
+ return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+ return new(zone()) LDebugBreak();
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+ return AssignEnvironment(new(zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
+ ? NULL : TempRegister();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
+ dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
+ ? NULL : TempRegister();
+ LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
+ return AssignEnvironment(DefineAsRegister(div));
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
+ } else {
+ return DoArithmeticT(Token::DIV, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+ HEnvironment* outer = current_block_->last_environment();
+ HConstant* undefined = graph()->GetConstantUndefined();
+ HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->arguments_count(),
+ instr->function(),
+ undefined,
+ instr->inlining_kind());
+ // Only replay binding of arguments object if it wasn't removed from graph.
+ if ((instr->arguments_var() != NULL) &&
+ instr->arguments_object()->IsLinked()) {
+ inner->Bind(instr->arguments_var(), instr->arguments_object());
+ }
+ inner->set_entry(instr);
+ current_block_->UpdateEnvironment(inner);
+ chunk_->AddInlinedClosure(instr->closure());
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(
+ HForceRepresentation* instr) {
+ // All HForceRepresentation instructions should be eliminated in the
+ // representation change phase of Hydrogen.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LFunctionLiteral(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+ HGetCachedArrayIndex* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+ return new(zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(instr->value()), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LHasInstanceTypeAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(
+ new(zone()) LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LInstanceOf* result = new(zone()) LInstanceOf(
+ context,
+ UseFixed(instr->left(), InstanceofStub::left()),
+ UseFixed(instr->right(), InstanceofStub::right()));
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+ HInstanceOfKnownGlobal* instr) {
+ LInstanceOfKnownGlobal* result = new(zone()) LInstanceOfKnownGlobal(
+ UseFixed(instr->context(), cp),
+ UseFixed(instr->left(), InstanceofStub::left()));
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // The function is required (by MacroAssembler::InvokeFunction) to be in x1.
+ LOperand* function = UseFixed(instr->function(), x1);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+ return MarkAsCall(DefineFixed(result, x0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new(zone()) LIsConstructCallAndBranch(TempRegister(), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ LOperand* value = UseRegister(instr->value());
+ LOperand* scratch = TempRegister();
+ return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ return new(zone()) LIsObjectAndBranch(value, temp1, temp2);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ return new(zone()) LIsStringAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ return new(zone()) LIsSmiAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ LInstruction* pop = NULL;
+ HEnvironment* env = current_block_->last_environment();
+
+ if (env->entry()->arguments_pushed()) {
+ int argument_count = env->arguments_environment()->parameter_count();
+ pop = new(zone()) LDrop(argument_count);
+ ASSERT(instr->argument_delta() == -argument_count);
+ }
+
+ HEnvironment* outer =
+ current_block_->last_environment()->DiscardInlined(false);
+ current_block_->UpdateEnvironment(outer);
+
+ return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ LOperand* context = UseRegisterAtStart(instr->value());
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LLoadContextSlot(context));
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+ HLoadFunctionPrototype* instr) {
+ LOperand* function = UseRegister(instr->function());
+ LOperand* temp = TempRegister();
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LLoadFunctionPrototype(function, temp)));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+ LLoadGlobalCell* result = new(zone()) LLoadGlobalCell();
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* global_object = UseFixed(instr->global_object(), x0);
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+ ASSERT(instr->key()->representation().IsSmiOrInteger32());
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* elements = UseRegister(instr->elements());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+ if (!instr->is_typed_elements()) {
+ if (instr->representation().IsDouble()) {
+ LOperand* temp = (!instr->key()->IsConstant() ||
+ instr->RequiresHoleCheck())
+ ? TempRegister()
+ : NULL;
+
+ LLoadKeyedFixedDouble* result =
+ new(zone()) LLoadKeyedFixedDouble(elements, key, temp);
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+ } else {
+ ASSERT(instr->representation().IsSmiOrTagged() ||
+ instr->representation().IsInteger32());
+ LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
+ LLoadKeyedFixed* result =
+ new(zone()) LLoadKeyedFixed(elements, key, temp);
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
+ }
+ } else {
+ ASSERT((instr->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+ (instr->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+
+ LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
+ LLoadKeyedExternal* result =
+ new(zone()) LLoadKeyedExternal(elements, key, temp);
+ // An unsigned int array load might overflow and cause a deopt. Make sure it
+ // has an environment.
+ if (instr->RequiresHoleCheck() ||
+ elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS) {
+ return AssignEnvironment(DefineAsRegister(result));
+ } else {
+ return DefineAsRegister(result);
+ }
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x1);
+ LOperand* key = UseFixed(instr->key(), x0);
+
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), x0);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LLoadNamedField(object));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x0);
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadNamedGeneric(context, object), x0);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+ LOperand* map = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* remainder = TempRegister();
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LFlooringDivI(dividend, divisor, remainder));
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
+ } else {
+ return DoFlooringDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+ LOperand* left = NULL;
+ LOperand* right = NULL;
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ left = UseRegisterAtStart(instr->left());
+ right = UseRegisterAtStart(instr->right());
+ }
+ return DefineAsRegister(new(zone()) LMathMinMax(left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp = TempRegister();
+ LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
+ dividend, divisor, temp));
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result = DefineAsRegister(new(zone()) LModI(dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->RightIsPowerOf2()) {
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
+ } else {
+ return DoModI(instr);
+ }
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
+ } else {
+ return DoArithmeticT(Token::MOD, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+ bool needs_environment = can_overflow || bailout_on_minus_zero;
+
+ HValue* least_const = instr->BetterLeftOperand();
+ HValue* most_const = instr->BetterRightOperand();
+
+ LOperand* left;
+
+ // LMulConstI can handle a subset of constants:
+ // With support for overflow detection:
+ // -1, 0, 1, 2
+ // 2^n, -(2^n)
+ // Without support for overflow detection:
+ // 2^n + 1, -(2^n - 1)
+ if (most_const->IsConstant()) {
+ int32_t constant = HConstant::cast(most_const)->Integer32Value();
+ bool small_constant = (constant >= -1) && (constant <= 2);
+ bool end_range_constant = (constant <= -kMaxInt) || (constant == kMaxInt);
+ int32_t constant_abs = Abs(constant);
+
+ if (!end_range_constant &&
+ (small_constant ||
+ (IsPowerOf2(constant_abs)) ||
+ (!can_overflow && (IsPowerOf2(constant_abs + 1) ||
+ IsPowerOf2(constant_abs - 1))))) {
+ LConstantOperand* right = UseConstant(most_const);
+ bool need_register = IsPowerOf2(constant_abs) && !small_constant;
+ left = need_register ? UseRegister(least_const)
+ : UseRegisterAtStart(least_const);
+ LMulConstIS* mul = new(zone()) LMulConstIS(left, right);
+ if (needs_environment) AssignEnvironment(mul);
+ return DefineAsRegister(mul);
+ }
+ }
+
+ left = UseRegisterAtStart(least_const);
+ // LMulI/S can handle all cases, but it requires that a register is
+ // allocated for the second operand.
+ LInstruction* result;
+ if (instr->representation().IsSmi()) {
+ LOperand* right = UseRegisterAtStart(most_const);
+ result = DefineAsRegister(new(zone()) LMulS(left, right));
+ } else {
+ LOperand* right = UseRegisterAtStart(most_const);
+ result = DefineAsRegister(new(zone()) LMulI(left, right));
+ }
+ if (needs_environment) AssignEnvironment(result);
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MUL, instr);
+ } else {
+ return DoArithmeticT(Token::MUL, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ ASSERT(argument_count_ == 0);
+ allocator_->MarkAsOsrEntry();
+ current_block_->last_environment()->set_ast_id(instr->ast_id());
+ return AssignEnvironment(new(zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+ LParameter* result = new(zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk_->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ int index = static_cast<int>(instr->index());
+ Register reg = descriptor->GetParameterRegister(index);
+ return DefineFixed(result, reg);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+ ASSERT(instr->representation().IsDouble());
+ // We call a C function for double power. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ Representation exponent_type = instr->right()->representation();
+ ASSERT(instr->left()->representation().IsDouble());
+ LOperand* left = UseFixedDouble(instr->left(), d0);
+ LOperand* right = exponent_type.IsInteger32()
+ ? UseFixed(instr->right(), x12)
+ : exponent_type.IsDouble()
+ ? UseFixedDouble(instr->right(), d1)
+ : UseFixed(instr->right(), x11);
+ LPower* result = new(zone()) LPower(left, right);
+ return MarkAsCall(DefineFixedDouble(result, d0),
+ instr,
+ CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
+ LOperand* argument = UseRegister(instr->argument());
+ return new(zone()) LPushArgument(argument);
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LRegExpLiteral(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ LOperand* temp = TempRegister();
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo, temp));
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ LOperand* context = info()->IsStub()
+ ? UseFixed(instr->context(), cp)
+ : NULL;
+ LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+ return new(zone()) LReturn(UseFixed(instr->value(), x0), context,
+ parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ LOperand* temp = TempRegister();
+ LSeqStringGetChar* result =
+ new(zone()) LSeqStringGetChar(string, index, temp);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegister(instr->index())
+ : UseRegisterOrConstant(instr->index());
+ LOperand* value = UseRegister(instr->value());
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+ LOperand* temp = TempRegister();
+ LSeqStringSetChar* result =
+ new(zone()) LSeqStringSetChar(context, string, index, value, temp);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+ HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsTagged()) {
+ return DoArithmeticT(op, instr);
+ }
+
+ ASSERT(instr->representation().IsInteger32() ||
+ instr->representation().IsSmi());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+ LOperand* left = instr->representation().IsSmi()
+ ? UseRegister(instr->left())
+ : UseRegisterAtStart(instr->left());
+
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ LOperand* temp = NULL;
+ int constant_value = 0;
+ if (right_value->IsConstant()) {
+ right = UseConstant(right_value);
+ HConstant* constant = HConstant::cast(right_value);
+ constant_value = constant->Integer32Value() & 0x1f;
+ } else {
+ right = UseRegisterAtStart(right_value);
+ if (op == Token::ROR) {
+ temp = TempRegister();
+ }
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift by 0 and the
+ // result cannot be truncated to int32.
+ bool does_deopt = false;
+ if ((op == Token::SHR) && (constant_value == 0)) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
+ }
+
+ LInstruction* result;
+ if (instr->representation().IsInteger32()) {
+ result = DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+ } else {
+ ASSERT(instr->representation().IsSmi());
+ result = DefineAsRegister(
+ new(zone()) LShiftS(op, left, right, temp, does_deopt));
+ }
+
+ return does_deopt ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+ return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+ return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+ return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ if (instr->is_function_entry()) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(HStoreCodeEntry* instr) {
+ LOperand* function = UseRegister(instr->function());
+ LOperand* code_object = UseRegisterAtStart(instr->code_object());
+ LOperand* temp = TempRegister();
+ return new(zone()) LStoreCodeEntry(function, code_object, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ LOperand* temp = TempRegister();
+ LOperand* context;
+ LOperand* value;
+ if (instr->NeedsWriteBarrier()) {
+ // TODO(all): Replace these constraints when RecordWriteStub has been
+ // rewritten.
+ context = UseRegisterAndClobber(instr->context());
+ value = UseRegisterAndClobber(instr->value());
+ } else {
+ context = UseRegister(instr->context());
+ value = UseRegister(instr->value());
+ }
+ LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+ LOperand* value = UseRegister(instr->value());
+ if (instr->RequiresHoleCheck()) {
+ return AssignEnvironment(new(zone()) LStoreGlobalCell(value,
+ TempRegister(),
+ TempRegister()));
+ } else {
+ return new(zone()) LStoreGlobalCell(value, TempRegister(), NULL);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ LOperand* temp = NULL;
+ LOperand* elements = NULL;
+ LOperand* val = NULL;
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+ if (!instr->is_typed_elements() &&
+ instr->value()->representation().IsTagged() &&
+ instr->NeedsWriteBarrier()) {
+ // RecordWrite() will clobber all registers.
+ elements = UseRegisterAndClobber(instr->elements());
+ val = UseRegisterAndClobber(instr->value());
+ temp = TempRegister();
+ } else {
+ elements = UseRegister(instr->elements());
+ val = UseRegister(instr->value());
+ temp = instr->key()->IsConstant() ? NULL : TempRegister();
+ }
+
+ if (instr->is_typed_elements()) {
+ ASSERT((instr->value()->representation().IsInteger32() &&
+ !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+ (instr->value()->representation().IsDouble() &&
+ IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ ASSERT((instr->is_fixed_typed_array() &&
+ instr->elements()->representation().IsTagged()) ||
+ (instr->is_external() &&
+ instr->elements()->representation().IsExternal()));
+ return new(zone()) LStoreKeyedExternal(elements, key, val, temp);
+
+ } else if (instr->value()->representation().IsDouble()) {
+ ASSERT(instr->elements()->representation().IsTagged());
+ return new(zone()) LStoreKeyedFixedDouble(elements, key, val, temp);
+
+ } else {
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsSmiOrTagged() ||
+ instr->value()->representation().IsInteger32());
+ return new(zone()) LStoreKeyedFixed(elements, key, val, temp);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x2);
+ LOperand* key = UseFixed(instr->key(), x1);
+ LOperand* value = UseFixed(instr->value(), x0);
+
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsTagged());
+
+ return MarkAsCall(
+ new(zone()) LStoreKeyedGeneric(context, object, key, value), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ // TODO(jbramley): It might be beneficial to allow value to be a constant in
+ // some cases. x64 makes use of this with FLAG_track_fields, for example.
+
+ LOperand* object = UseRegister(instr->object());
+ LOperand* value;
+ LOperand* temp0 = NULL;
+ LOperand* temp1 = NULL;
+
+ if (instr->access().IsExternalMemory() ||
+ instr->field_representation().IsDouble()) {
+ value = UseRegister(instr->value());
+ } else if (instr->NeedsWriteBarrier()) {
+ value = UseRegisterAndClobber(instr->value());
+ temp0 = TempRegister();
+ temp1 = TempRegister();
+ } else if (instr->NeedsWriteBarrierForMap()) {
+ value = UseRegister(instr->value());
+ temp0 = TempRegister();
+ temp1 = TempRegister();
+ } else {
+ value = UseRegister(instr->value());
+ temp0 = TempRegister();
+ }
+
+ LStoreNamedField* result =
+ new(zone()) LStoreNamedField(object, value, temp0, temp1);
+ if (instr->field_representation().IsHeapObject() &&
+ !instr->value()->type().IsHeapObject()) {
+ return AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* object = UseFixed(instr->object(), x1);
+ LOperand* value = UseFixed(instr->value(), x0);
+ LInstruction* result = new(zone()) LStoreNamedGeneric(context, object, value);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), x1);
+ LOperand* right = UseFixed(instr->right(), x0);
+
+ LStringAdd* result = new(zone()) LStringAdd(context, left, right);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ LOperand* string = UseRegisterAndClobber(instr->string());
+ LOperand* index = UseRegisterAndClobber(instr->index());
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+ LOperand* char_code = UseRegister(instr->value());
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+ HStringCompareAndBranch* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = UseFixed(instr->left(), x1);
+ LOperand* right = UseFixed(instr->right(), x0);
+ LStringCompareAndBranch* result =
+ new(zone()) LStringCompareAndBranch(context, left, right);
+ return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand *left;
+ if (instr->left()->IsConstant() &&
+ (HConstant::cast(instr->left())->Integer32Value() == 0)) {
+ left = UseConstant(instr->left());
+ } else {
+ left = UseRegisterAtStart(instr->left());
+ }
+ LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+ LInstruction* result = instr->representation().IsSmi() ?
+ DefineAsRegister(new(zone()) LSubS(left, right)) :
+ DefineAsRegister(new(zone()) LSubI(left, right));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::SUB, instr);
+ } else {
+ return DoArithmeticT(Token::SUB, instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ if (instr->HasNoUses()) {
+ return NULL;
+ } else {
+ return DefineAsRegister(new(zone()) LThisFunction);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+ LOperand* object = UseFixed(instr->value(), x0);
+ LToFastProperties* result = new(zone()) LToFastProperties(object);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+ HTransitionElementsKind* instr) {
+ LOperand* object = UseRegister(instr->object());
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, NULL,
+ TempRegister(), TempRegister());
+ return result;
+ } else {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, context, TempRegister());
+ return AssignPointerMap(result);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LTrapAllocationMemento* result =
+ new(zone()) LTrapAllocationMemento(object, temp1, temp2);
+ return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // TODO(jbramley): In ARM, this uses UseFixed to force the input to x0.
+ // However, LCodeGen::DoTypeof just pushes it to the stack (for CallRuntime)
+ // anyway, so the input doesn't have to be in x0. We might be able to improve
+ // the ARM back-end a little by relaxing this restriction.
+ LTypeof* result =
+ new(zone()) LTypeof(context, UseRegisterAtStart(instr->value()));
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ // We only need temp registers in some cases, but we can't dereference the
+ // instr->type_literal() handle to test that here.
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+
+ return new(zone()) LTypeofIsAndBranch(
+ UseRegister(instr->value()), temp1, temp2);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+ switch (instr->op()) {
+ case kMathAbs: {
+ Representation r = instr->representation();
+ if (r.IsTagged()) {
+ // The tagged case might need to allocate a HeapNumber for the result,
+ // so it is handled by a separate LInstruction.
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = TempRegister();
+ LMathAbsTagged* result =
+ new(zone()) LMathAbsTagged(context, input, temp1, temp2, temp3);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathAbs* result = new(zone()) LMathAbs(input);
+ if (r.IsDouble()) {
+ // The Double case can never fail so it doesn't need an environment.
+ return DefineAsRegister(result);
+ } else {
+ ASSERT(r.IsInteger32() || r.IsSmi());
+ // The Integer32 and Smi cases need an environment because they can
+ // deoptimize on minimum representable number.
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+ }
+ }
+ case kMathExp: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ // TODO(all): Implement TempFPRegister.
+ LOperand* double_temp1 = FixedTemp(d24); // This was chosen arbitrarily.
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = TempRegister();
+ LMathExp* result = new(zone()) LMathExp(input, double_temp1,
+ temp1, temp2, temp3);
+ return DefineAsRegister(result);
+ }
+ case kMathFloor: {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->value()->representation().IsDouble());
+ // TODO(jbramley): ARM64 can easily handle a double argument with frintm,
+ // but we're never asked for it here. At the moment, we fall back to the
+ // runtime if the result doesn't fit, like the other architectures.
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathFloor* result = new(zone()) LMathFloor(input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ }
+ case kMathLog: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ LMathLog* result = new(zone()) LMathLog(input);
+ return MarkAsCall(DefineFixedDouble(result, d0), instr);
+ }
+ case kMathPowHalf: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegister(instr->value());
+ return DefineAsRegister(new(zone()) LMathPowHalf(input));
+ }
+ case kMathRound: {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->value()->representation().IsDouble());
+ // TODO(jbramley): As with kMathFloor, we can probably handle double
+ // results fairly easily, but we are never asked for them.
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = FixedTemp(d24); // Choosen arbitrarily.
+ LMathRound* result = new(zone()) LMathRound(input, temp);
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+ case kMathSqrt: {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMathSqrt(input));
+ }
+ case kMathClz32: {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->value()->representation().IsInteger32());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LMathClz32(input));
+ }
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk_->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
+ }
+ return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ // Assign object to a fixed register different from those already used in
+ // LForInPrepareMap.
+ LOperand* object = UseFixed(instr->enumerable(), x0);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
+ return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+ LOperand* map = UseRegister(instr->map());
+ return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* map = UseRegister(instr->map());
+ LOperand* temp = TempRegister();
+ return AssignEnvironment(new(zone()) LCheckMapValue(value, map, temp));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ LOperand* index = UseRegister(instr->index());
+ return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegister(instr->receiver());
+ LOperand* function = UseRegister(instr->function());
+ LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/arm64/lithium-arm64.h b/deps/v8/src/arm64/lithium-arm64.h
new file mode 100644
index 000000000..da3c5f17b
--- /dev/null
+++ b/deps/v8/src/arm64/lithium-arm64.h
@@ -0,0 +1,3100 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_LITHIUM_ARM64_H_
+#define V8_ARM64_LITHIUM_ARM64_H_
+
+#include "hydrogen.h"
+#include "lithium-allocator.h"
+#include "lithium.h"
+#include "safepoint-table.h"
+#include "utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AccessArgumentsAt) \
+ V(AddE) \
+ V(AddI) \
+ V(AddS) \
+ V(Allocate) \
+ V(ApplyArguments) \
+ V(ArgumentsElements) \
+ V(ArgumentsLength) \
+ V(ArithmeticD) \
+ V(ArithmeticT) \
+ V(BitI) \
+ V(BitS) \
+ V(BoundsCheck) \
+ V(Branch) \
+ V(CallFunction) \
+ V(CallJSFunction) \
+ V(CallNew) \
+ V(CallNewArray) \
+ V(CallRuntime) \
+ V(CallStub) \
+ V(CallWithDescriptor) \
+ V(CheckInstanceType) \
+ V(CheckMapValue) \
+ V(CheckMaps) \
+ V(CheckNonSmi) \
+ V(CheckSmi) \
+ V(CheckValue) \
+ V(ClampDToUint8) \
+ V(ClampIToUint8) \
+ V(ClampTToUint8) \
+ V(ClassOfTestAndBranch) \
+ V(CmpHoleAndBranchD) \
+ V(CmpHoleAndBranchT) \
+ V(CmpMapAndBranch) \
+ V(CmpObjectEqAndBranch) \
+ V(CmpT) \
+ V(CompareMinusZeroAndBranch) \
+ V(CompareNumericAndBranch) \
+ V(ConstantD) \
+ V(ConstantE) \
+ V(ConstantI) \
+ V(ConstantS) \
+ V(ConstantT) \
+ V(ConstructDouble) \
+ V(Context) \
+ V(DateField) \
+ V(DebugBreak) \
+ V(DeclareGlobals) \
+ V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
+ V(DivI) \
+ V(DoubleBits) \
+ V(DoubleToIntOrSmi) \
+ V(Drop) \
+ V(Dummy) \
+ V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
+ V(FlooringDivI) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
+ V(FunctionLiteral) \
+ V(GetCachedArrayIndex) \
+ V(Goto) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
+ V(InstanceOf) \
+ V(InstanceOfKnownGlobal) \
+ V(InstructionGap) \
+ V(Integer32ToDouble) \
+ V(InvokeFunction) \
+ V(IsConstructCallAndBranch) \
+ V(IsObjectAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsStringAndBranch) \
+ V(IsUndetectableAndBranch) \
+ V(Label) \
+ V(LazyBailout) \
+ V(LoadContextSlot) \
+ V(LoadFieldByIndex) \
+ V(LoadFunctionPrototype) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
+ V(LoadKeyedExternal) \
+ V(LoadKeyedFixed) \
+ V(LoadKeyedFixedDouble) \
+ V(LoadKeyedGeneric) \
+ V(LoadNamedField) \
+ V(LoadNamedGeneric) \
+ V(LoadRoot) \
+ V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathAbsTagged) \
+ V(MathClz32) \
+ V(MathExp) \
+ V(MathFloor) \
+ V(MathLog) \
+ V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
+ V(ModI) \
+ V(MulConstIS) \
+ V(MulI) \
+ V(MulS) \
+ V(NumberTagD) \
+ V(NumberTagU) \
+ V(NumberUntagD) \
+ V(OsrEntry) \
+ V(Parameter) \
+ V(Power) \
+ V(PushArgument) \
+ V(RegExpLiteral) \
+ V(Return) \
+ V(SeqStringGetChar) \
+ V(SeqStringSetChar) \
+ V(ShiftI) \
+ V(ShiftS) \
+ V(SmiTag) \
+ V(SmiUntag) \
+ V(StackCheck) \
+ V(StoreCodeEntry) \
+ V(StoreContextSlot) \
+ V(StoreGlobalCell) \
+ V(StoreKeyedExternal) \
+ V(StoreKeyedFixed) \
+ V(StoreKeyedFixedDouble) \
+ V(StoreKeyedGeneric) \
+ V(StoreNamedField) \
+ V(StoreNamedGeneric) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringCharFromCode) \
+ V(StringCompareAndBranch) \
+ V(SubI) \
+ V(SubS) \
+ V(TaggedToI) \
+ V(ThisFunction) \
+ V(ToFastProperties) \
+ V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
+ V(TruncateDoubleToIntOrSmi) \
+ V(Typeof) \
+ V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
+ V(UnknownOSRValue) \
+ V(WrapReceiver)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
+ }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type) \
+ H##type* hydrogen() const { \
+ return H##type::cast(this->hydrogen_value()); \
+ }
+
+
+class LInstruction : public ZoneObject {
+ public:
+ LInstruction()
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ bit_field_(IsCallBits::encode(false)) { }
+
+ virtual ~LInstruction() { }
+
+ virtual void CompileToNative(LCodeGen* generator) = 0;
+ virtual const char* Mnemonic() const = 0;
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
+
+ enum Opcode {
+ // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+ kNumberOfInstructions
+#undef DECLARE_OPCODE
+ };
+
+ virtual Opcode opcode() const = 0;
+
+ // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+ bool Is##type() const { return opcode() == k##type; }
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+ // Declare virtual predicates for instructions that don't have
+ // an opcode.
+ virtual bool IsGap() const { return false; }
+
+ virtual bool IsControl() const { return false; }
+
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
+
+ void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+ LPointerMap* pointer_map() const { return pointer_map_.get(); }
+ bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+ void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+ HValue* hydrogen_value() const { return hydrogen_value_; }
+
+ virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
+
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+ // Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
+ bool IsMarkedAsCall() const { return IsCall(); }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() const = 0;
+
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
+ private:
+ class IsCallBits: public BitField<bool, 0, 1> {};
+
+ LEnvironment* environment_;
+ SetOncePointer<LPointerMap> pointer_map_;
+ HValue* hydrogen_value_;
+ int32_t bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return (R != 0) && (result() != NULL);
+ }
+ void set_result(LOperand* operand) { results_[0] = operand; }
+ LOperand* result() const { return results_[0]; }
+
+ protected:
+ EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+};
+
+
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
+ public:
+ LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+ int TrueDestination(LChunk* chunk) {
+ return chunk->LookupDestination(true_block_id());
+ }
+
+ int FalseDestination(LChunk* chunk) {
+ return chunk->LookupDestination(false_block_id());
+ }
+
+ Label* TrueLabel(LChunk* chunk) {
+ if (true_label_ == NULL) {
+ true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+ }
+ return true_label_;
+ }
+
+ Label* FalseLabel(LChunk* chunk) {
+ if (false_label_ == NULL) {
+ false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+ }
+ return false_label_;
+ }
+
+ protected:
+ int true_block_id() { return SuccessorAt(0)->block_id(); }
+ int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+ DECLARE_HYDROGEN_ACCESSOR(ControlInstruction);
+
+ Label* false_label_;
+ Label* true_label_;
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGap(HBasicBlock* block)
+ : block_(block) {
+ parallel_moves_[BEFORE] = NULL;
+ parallel_moves_[START] = NULL;
+ parallel_moves_[END] = NULL;
+ parallel_moves_[AFTER] = NULL;
+ }
+
+ // Can't use the DECLARE-macro here because of sub-classes.
+ virtual bool IsGap() const V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ static LGap* cast(LInstruction* instr) {
+ ASSERT(instr->IsGap());
+ return reinterpret_cast<LGap*>(instr);
+ }
+
+ bool IsRedundant() const;
+
+ HBasicBlock* block() const { return block_; }
+
+ enum InnerPosition {
+ BEFORE,
+ START,
+ END,
+ AFTER,
+ FIRST_INNER_POSITION = BEFORE,
+ LAST_INNER_POSITION = AFTER
+ };
+
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new(zone) LParallelMove(zone);
+ }
+ return parallel_moves_[pos];
+ }
+
+ LParallelMove* GetParallelMove(InnerPosition pos) {
+ return parallel_moves_[pos];
+ }
+
+ private:
+ LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+ HBasicBlock* block_;
+};
+
+
+class LInstructionGap V8_FINAL : public LGap {
+ public:
+ explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return !IsRedundant();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LDrop(int count) : count_(count) { }
+
+ int count() const { return count_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+ int count_;
+};
+
+
+class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LDummy() { }
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) {
+ inputs_[0] = value;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
+
+ int block_id() const { return block_->block_id(); }
+
+ private:
+ HBasicBlock* block_;
+};
+
+
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LLazyBailout() : gap_instructions_size_(0) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+ void set_gap_instructions_size(int gap_instructions_size) {
+ gap_instructions_size_ = gap_instructions_size;
+ }
+ int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+ int gap_instructions_size_;
+};
+
+
+class LLabel V8_FINAL : public LGap {
+ public:
+ explicit LLabel(HBasicBlock* block)
+ : LGap(block), replacement_(NULL) { }
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int block_id() const { return block()->block_id(); }
+ bool is_loop_header() const { return block()->IsLoopHeader(); }
+ bool is_osr_entry() const { return block()->is_osr_entry(); }
+ Label* label() { return &label_; }
+ LLabel* replacement() const { return replacement_; }
+ void set_replacement(LLabel* label) { replacement_ = label; }
+ bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+ Label label_;
+ LLabel* replacement_;
+};
+
+
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LOsrEntry() {}
+
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LAccessArgumentsAt(LOperand* arguments,
+ LOperand* length,
+ LOperand* index) {
+ inputs_[0] = arguments;
+ inputs_[1] = length;
+ inputs_[2] = index;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+ LOperand* arguments() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LAddE V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddE(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddE, "add-e")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LAddS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LAddS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddS, "add-s")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 3> {
+ public:
+ LAllocate(LOperand* context,
+ LOperand* size,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = context;
+ inputs_[1] = size;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LApplyArguments(LOperand* function,
+ LOperand* receiver,
+ LOperand* length,
+ LOperand* elements) {
+ inputs_[0] = function;
+ inputs_[1] = receiver;
+ inputs_[2] = length;
+ inputs_[3] = elements;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* length() { return inputs_[2]; }
+ LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 1> {
+ public:
+ explicit LArgumentsElements(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+ DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LArgumentsLength(LOperand* elements) {
+ inputs_[0] = elements;
+ }
+
+ LOperand* elements() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LArithmeticD(Token::Value op,
+ LOperand* left,
+ LOperand* right)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
+ : op_(op) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+ Token::Value op() const { return op_; }
+
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticT;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+ explicit LBoundsCheck(LOperand* index, LOperand* length) {
+ inputs_[0] = index;
+ inputs_[1] = length;
+ }
+
+ LOperand* index() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBitI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Token::Value op() const { return hydrogen()->op(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LBitS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBitS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ Token::Value op() const { return hydrogen()->op(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitS, "bit-s")
+ DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ explicit LBranch(LOperand* value, LOperand *temp1, LOperand *temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallJSFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+ DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+ DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
+ const Runtime::Function* function() const { return hydrogen()->function(); }
+ int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallStub(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+ DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ explicit LCheckInstanceType(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+ DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+ explicit LCheckMaps(LOperand* value, LOperand* temp = NULL) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckNonSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+ DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCheckSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckValue(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampDToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LClampIToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LClampTToUint8(LOperand* unclamped, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = unclamped;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo, LOperand* temp) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ temps_[0] = temp;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+ "class-of-test-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LCmpHoleAndBranchD V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ explicit LCmpHoleAndBranchD(LOperand* object, LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranchD, "cmp-hole-and-branch-d")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCmpHoleAndBranchT V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranchT(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranchT, "cmp-hole-and-branch-t")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+ public:
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const {
+ return hydrogen()->representation().IsDouble();
+ }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ double value() const { return hydrogen()->DoubleValue(); }
+};
+
+
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ ExternalReference value() const {
+ return hydrogen()->ExternalReferenceValue();
+ }
+};
+
+
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
+};
+
+
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDateField(LOperand* date, Smi* index) : index_(index) {
+ inputs_[0] = date;
+ }
+
+ LOperand* date() { return inputs_[0]; }
+ Smi* index() const { return index_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
+
+ private:
+ Smi* index_;
+};
+
+
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LDeclareGlobals(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+ DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LDivI(LOperand* left, LOperand* right, LOperand* temp) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LDoubleToIntOrSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleToIntOrSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToIntOrSmi, "double-to-int-or-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool tag_result() { return hydrogen()->representation().IsSmi(); }
+};
+
+
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LForInCacheArray(LOperand* map) {
+ inputs_[0] = map;
+ }
+
+ LOperand* map() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+ int idx() {
+ return HForInCacheArray::cast(this->hydrogen_value())->idx();
+ }
+};
+
+
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGetCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 1> {
+ public:
+ LHasCachedArrayIndexAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+ "has-cached-array-index-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+ "has-instance-type-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LInnerAllocatedObject V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+ inputs_[0] = base_object;
+ inputs_[1] = offset;
+ }
+
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+ "instance-of-known-global")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+ Handle<JSFunction> function() const { return hydrogen()->function(); }
+ LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
+ return lazy_deopt_env_;
+ }
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
+ lazy_deopt_env_ = env;
+ }
+
+ private:
+ LEnvironment* lazy_deopt_env_;
+};
+
+
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInteger32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+ public:
+ LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
+ ZoneList<LOperand*>& operands,
+ Zone* zone)
+ : descriptor_(descriptor),
+ inputs_(descriptor->environment_length() + 1, zone) {
+ ASSERT(descriptor->environment_length() + 1 == operands.length());
+ inputs_.AddAll(operands, zone);
+ }
+
+ LOperand* target() const { return inputs_[0]; }
+
+ const CallInterfaceDescriptor* descriptor() { return descriptor_; }
+
+ private:
+ DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+
+ const CallInterfaceDescriptor* descriptor_;
+ ZoneList<LOperand*> inputs_;
+
+ // Iterator support.
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+};
+
+
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+ DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 2> {
+ public:
+ LIsConstructCallAndBranch(LOperand* temp1, LOperand* temp2) {
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+ "is-construct-call-and-branch")
+};
+
+
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ LIsObjectAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ LIsStringAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LIsSmiAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+ public:
+ explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadContextSlot(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ int slot_index() const { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedField(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+ DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+};
+
+
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
+ inputs_[0] = function;
+ temps_[0] = temp;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+ DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+template<int T>
+class LLoadKeyed : public LTemplateInstruction<1, 2, T> {
+ public:
+ LLoadKeyed(LOperand* elements, LOperand* key) {
+ this->inputs_[0] = elements;
+ this->inputs_[1] = key;
+ }
+
+ LOperand* elements() { return this->inputs_[0]; }
+ LOperand* key() { return this->inputs_[1]; }
+ ElementsKind elements_kind() const {
+ return this->hydrogen()->elements_kind();
+ }
+ bool is_external() const {
+ return this->hydrogen()->is_external();
+ }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+ uint32_t additional_index() const {
+ return this->hydrogen()->index_offset();
+ }
+ void PrintDataTo(StringStream* stream) V8_OVERRIDE {
+ this->elements()->PrintTo(stream);
+ stream->Add("[");
+ this->key()->PrintTo(stream);
+ if (this->hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", this->additional_index());
+ } else {
+ stream->Add("]");
+ }
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+};
+
+
+class LLoadKeyedExternal: public LLoadKeyed<1> {
+ public:
+ LLoadKeyedExternal(LOperand* elements, LOperand* key, LOperand* temp) :
+ LLoadKeyed<1>(elements, key) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedExternal, "load-keyed-external");
+};
+
+
+class LLoadKeyedFixed: public LLoadKeyed<1> {
+ public:
+ LLoadKeyedFixed(LOperand* elements, LOperand* key, LOperand* temp) :
+ LLoadKeyed<1>(elements, key) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFixed, "load-keyed-fixed");
+};
+
+
+class LLoadKeyedFixedDouble: public LLoadKeyed<1> {
+ public:
+ LLoadKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* temp) :
+ LLoadKeyed<1>(elements, key) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFixedDouble, "load-keyed-fixed-double");
+};
+
+
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+};
+
+
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadNamedGeneric(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMapEnumLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+template<int T>
+class LUnaryMathOperation : public LTemplateInstruction<1, 1, T> {
+ public:
+ explicit LUnaryMathOperation(LOperand* value) {
+ this->inputs_[0] = value;
+ }
+
+ LOperand* value() { return this->inputs_[0]; }
+ BuiltinFunctionId op() const { return this->hydrogen()->op(); }
+
+ void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathAbs V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathAbs(LOperand* value) : LUnaryMathOperation<0>(value) {}
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+};
+
+
+class LMathAbsTagged: public LTemplateInstruction<1, 2, 3> {
+ public:
+ LMathAbsTagged(LOperand* context, LOperand* value,
+ LOperand* temp1, LOperand* temp2, LOperand* temp3) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbsTagged, "math-abs-tagged")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathExp V8_FINAL : public LUnaryMathOperation<4> {
+ public:
+ LMathExp(LOperand* value,
+ LOperand* double_temp1,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3)
+ : LUnaryMathOperation<4>(value) {
+ temps_[0] = double_temp1;
+ temps_[1] = temp1;
+ temps_[2] = temp2;
+ temps_[3] = temp3;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* double_temp1() { return temps_[0]; }
+ LOperand* temp1() { return temps_[1]; }
+ LOperand* temp2() { return temps_[2]; }
+ LOperand* temp3() { return temps_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathFloor V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathFloor(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+};
+
+
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ inputs_[1] = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ LOperand* divisor() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMathLog V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathLog(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathClz32 V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathClz32(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMathMinMax(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+ DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LMathPowHalf V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathPowHalf(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LMathRound V8_FINAL : public LUnaryMathOperation<1> {
+ public:
+ LMathRound(LOperand* value, LOperand* temp1)
+ : LUnaryMathOperation<1>(value) {
+ temps_[0] = temp1;
+ }
+
+ LOperand* temp1() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+};
+
+
+class LMathSqrt V8_FINAL : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathSqrt(LOperand* value) : LUnaryMathOperation<0>(value) { }
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LModByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LModI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LMulConstIS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulConstIS(LOperand* left, LConstantOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LConstantOperand* right() { return LConstantOperand::cast(inputs_[1]); }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulConstIS, "mul-const-i-s")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LMulS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LMulS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-s")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ explicit LNumberTagU(LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LNumberUntagD(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LPower(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LPushArgument(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+ DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+ LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
+ inputs_[0] = value;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* parameter_count() { return inputs_[2]; }
+
+ bool has_constant_parameter_count() {
+ return parameter_count()->IsConstantOperand();
+ }
+ LConstantOperand* constant_parameter_count() {
+ ASSERT(has_constant_parameter_count());
+ return LConstantOperand::cast(parameter_count());
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LSeqStringGetChar(LOperand* string,
+ LOperand* index,
+ LOperand* temp) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ temps_[0] = temp;
+ }
+
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 1> {
+ public:
+ LSeqStringSetChar(LOperand* context,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value,
+ LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LSmiTag(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LSmiUntag(LOperand* value, bool needs_check)
+ : needs_check_(needs_check) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ bool needs_check() const { return needs_check_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ private:
+ bool needs_check_;
+};
+
+
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
+};
+
+
+template<int T>
+class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
+ public:
+ LStoreKeyed(LOperand* elements, LOperand* key, LOperand* value) {
+ this->inputs_[0] = elements;
+ this->inputs_[1] = key;
+ this->inputs_[2] = value;
+ }
+
+ bool is_external() const { return this->hydrogen()->is_external(); }
+ bool is_fixed_typed_array() const {
+ return hydrogen()->is_fixed_typed_array();
+ }
+ bool is_typed_elements() const {
+ return is_external() || is_fixed_typed_array();
+ }
+ LOperand* elements() { return this->inputs_[0]; }
+ LOperand* key() { return this->inputs_[1]; }
+ LOperand* value() { return this->inputs_[2]; }
+ ElementsKind elements_kind() const {
+ return this->hydrogen()->elements_kind();
+ }
+
+ bool NeedsCanonicalization() {
+ return this->hydrogen()->NeedsCanonicalization();
+ }
+ uint32_t additional_index() const { return this->hydrogen()->index_offset(); }
+
+ void PrintDataTo(StringStream* stream) V8_OVERRIDE {
+ this->elements()->PrintTo(stream);
+ stream->Add("[");
+ this->key()->PrintTo(stream);
+ if (this->hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", this->additional_index());
+ } else {
+ stream->Add("] <- ");
+ }
+
+ if (this->value() == NULL) {
+ ASSERT(hydrogen()->IsConstantHoleStore() &&
+ hydrogen()->value()->representation().IsDouble());
+ stream->Add("<the hole(nan)>");
+ } else {
+ this->value()->PrintTo(stream);
+ }
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+};
+
+
+class LStoreKeyedExternal V8_FINAL : public LStoreKeyed<1> {
+ public:
+ LStoreKeyedExternal(LOperand* elements, LOperand* key, LOperand* value,
+ LOperand* temp) :
+ LStoreKeyed<1>(elements, key, value) {
+ temps_[0] = temp;
+ };
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedExternal, "store-keyed-external")
+};
+
+
+class LStoreKeyedFixed V8_FINAL : public LStoreKeyed<1> {
+ public:
+ LStoreKeyedFixed(LOperand* elements, LOperand* key, LOperand* value,
+ LOperand* temp) :
+ LStoreKeyed<1>(elements, key, value) {
+ temps_[0] = temp;
+ };
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFixed, "store-keyed-fixed")
+};
+
+
+class LStoreKeyedFixedDouble V8_FINAL : public LStoreKeyed<1> {
+ public:
+ LStoreKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* value,
+ LOperand* temp) :
+ LStoreKeyed<1>(elements, key, value) {
+ temps_[0] = temp;
+ };
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFixedDouble,
+ "store-keyed-fixed-double")
+};
+
+
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+ public:
+ LStoreKeyedGeneric(LOperand* context,
+ LOperand* obj,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
+ inputs_[3] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+ public:
+ LStoreNamedField(LOperand* object, LOperand* value,
+ LOperand* temp0, LOperand* temp1) {
+ inputs_[0] = object;
+ inputs_[1] = value;
+ temps_[0] = temp0;
+ temps_[1] = temp1;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp0() { return temps_[0]; }
+ LOperand* temp1() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Handle<Map> transition() const { return hydrogen()->transition_map(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
+};
+
+
+class LStoreNamedGeneric V8_FINAL: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+ public:
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+ "string-compare-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ explicit LTaggedToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+ : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ bool can_deopt() const { return can_deopt_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+
+class LShiftS V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+ LShiftS(Token::Value op, LOperand* left, LOperand* right, LOperand* temp,
+ bool can_deopt) : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+ bool can_deopt() const { return can_deopt_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftS, "shift-s")
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object,
+ LOperand* temp) {
+ inputs_[0] = function;
+ inputs_[1] = code_object;
+ temps_[0] = temp;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 2> {
+ public:
+ LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LSubS: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LSubS(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SubS, "sub-s")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+ DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LToFastProperties(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+ DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+ public:
+ LTransitionElementsKind(LOperand* object,
+ LOperand* context,
+ LOperand* temp1,
+ LOperand* temp2 = NULL) {
+ inputs_[0] = object;
+ inputs_[1] = context;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+ "transition-elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
+ ElementsKind from_kind() const { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() const { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 2> {
+ public:
+ LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = object;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
+};
+
+
+class LTruncateDoubleToIntOrSmi V8_FINAL
+ : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LTruncateDoubleToIntOrSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TruncateDoubleToIntOrSmi,
+ "truncate-double-to-int-or-smi")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool tag_result() { return hydrogen()->representation().IsSmi(); }
+};
+
+
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+ public:
+ LTypeofIsAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+ Handle<String> type_literal() const { return hydrogen()->type_literal(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+};
+
+
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+ LCheckMapValue(LOperand* value, LOperand* map, LOperand* temp) {
+ inputs_[0] = value;
+ inputs_[1] = map;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* map() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadFieldByIndex(LOperand* object, LOperand* index) {
+ inputs_[0] = object;
+ inputs_[1] = index;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LWrapReceiver(LOperand* receiver, LOperand* function) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+ DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk V8_FINAL : public LChunk {
+ public:
+ LPlatformChunk(CompilationInfo* info, HGraph* graph)
+ : LChunk(info, graph) { }
+
+ int GetNextSpillIndex();
+ LOperand* GetNextSpillSlot(RegisterKind kind);
+};
+
+
+class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+ public:
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+ : LChunkBuilderBase(graph->zone()),
+ chunk_(NULL),
+ info_(info),
+ graph_(graph),
+ status_(UNUSED),
+ current_instruction_(NULL),
+ current_block_(NULL),
+ allocator_(allocator) { }
+
+ // Build the sequence for the graph.
+ LPlatformChunk* Build();
+
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HBinaryOperation* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ static bool HasMagicNumberForDivision(int32_t divisor);
+
+ private:
+ enum Status {
+ UNUSED,
+ BUILDING,
+ DONE,
+ ABORTED
+ };
+
+ HGraph* graph() const { return graph_; }
+ Isolate* isolate() const { return info_->isolate(); }
+
+ bool is_unused() const { return status_ == UNUSED; }
+ bool is_building() const { return status_ == BUILDING; }
+ bool is_done() const { return status_ == DONE; }
+ bool is_aborted() const { return status_ == ABORTED; }
+
+ int argument_count() const { return argument_count_; }
+ CompilationInfo* info() const { return info_; }
+ Heap* heap() const { return isolate()->heap(); }
+
+ void Abort(BailoutReason reason);
+
+ // Methods for getting operands for Use / Define / Temp.
+ LUnallocated* ToUnallocated(Register reg);
+ LUnallocated* ToUnallocated(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+ MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+ MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+ DoubleRegister fixed_register);
+
+ // A value that is guaranteed to be allocated to a register.
+ // The operand created by UseRegister is guaranteed to be live until the end
+ // of the instruction. This means that register allocator will not reuse its
+ // register for any other operand inside instruction.
+ MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+
+ // The operand created by UseRegisterAndClobber is guaranteed to be live until
+ // the end of the end of the instruction, and it may also be used as a scratch
+ // register by the instruction implementation.
+ //
+ // This behaves identically to ARM's UseTempRegister. However, it is renamed
+ // to discourage its use in ARM64, since in most cases it is better to
+ // allocate a temporary register for the Lithium instruction.
+ MUST_USE_RESULT LOperand* UseRegisterAndClobber(HValue* value);
+
+ // The operand created by UseRegisterAtStart is guaranteed to be live only at
+ // instruction start. The register allocator is free to assign the same
+ // register to some other operand used inside instruction (i.e. temporary or
+ // output).
+ MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+ // An input operand in a register or a constant operand.
+ MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+ // A constant operand.
+ MUST_USE_RESULT LConstantOperand* UseConstant(HValue* value);
+
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ virtual MUST_USE_RESULT LOperand* UseAny(HValue* value);
+
+ // Temporary operand that must be in a register.
+ MUST_USE_RESULT LUnallocated* TempRegister();
+
+ // Temporary operand that must be in a fixed double register.
+ MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+ // Methods for setting up define-use relationships.
+ // Return the same instruction that they are passed.
+ LInstruction* Define(LTemplateResultInstruction<1>* instr,
+ LUnallocated* result);
+ LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+ int index);
+
+ LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+ LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+ Register reg);
+ LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+ DoubleRegister reg);
+
+ enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+ // By default we assume that instruction sequences generated for calls
+ // cannot deoptimize eagerly and we do not attach environment to this
+ // instruction.
+ LInstruction* MarkAsCall(
+ LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+ LInstruction* AssignPointerMap(LInstruction* instr);
+ LInstruction* AssignEnvironment(LInstruction* instr);
+
+ void VisitInstruction(HInstruction* current);
+ void DoBasicBlock(HBasicBlock* block);
+
+ LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+ LInstruction* DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr);
+ LInstruction* DoArithmeticT(Token::Value op,
+ HBinaryOperation* instr);
+
+ LPlatformChunk* chunk_;
+ CompilationInfo* info_;
+ HGraph* const graph_;
+ Status status_;
+ HInstruction* current_instruction_;
+ HBasicBlock* current_block_;
+ LAllocator* allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_LITHIUM_ARM64_H_
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.cc b/deps/v8/src/arm64/lithium-codegen-arm64.cc
new file mode 100644
index 000000000..cd931e934
--- /dev/null
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.cc
@@ -0,0 +1,5901 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "arm64/lithium-codegen-arm64.h"
+#include "arm64/lithium-gap-resolver-arm64.h"
+#include "code-stubs.h"
+#include "stub-cache.h"
+#include "hydrogen-osr.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator V8_FINAL : public CallWrapper {
+ public:
+ SafepointGenerator(LCodeGen* codegen,
+ LPointerMap* pointers,
+ Safepoint::DeoptMode mode)
+ : codegen_(codegen),
+ pointers_(pointers),
+ deopt_mode_(mode) { }
+ virtual ~SafepointGenerator() { }
+
+ virtual void BeforeCall(int call_size) const { }
+
+ virtual void AfterCall() const {
+ codegen_->RecordSafepoint(pointers_, deopt_mode_);
+ }
+
+ private:
+ LCodeGen* codegen_;
+ LPointerMap* pointers_;
+ Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+// Emit code to branch if the given condition holds.
+// The code generated here doesn't modify the flags and they must have
+// been set by some prior instructions.
+//
+// The EmitInverted function simply inverts the condition.
+class BranchOnCondition : public BranchGenerator {
+ public:
+ BranchOnCondition(LCodeGen* codegen, Condition cond)
+ : BranchGenerator(codegen),
+ cond_(cond) { }
+
+ virtual void Emit(Label* label) const {
+ __ B(cond_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ if (cond_ != al) {
+ __ B(InvertCondition(cond_), label);
+ }
+ }
+
+ private:
+ Condition cond_;
+};
+
+
+// Emit code to compare lhs and rhs and branch if the condition holds.
+// This uses MacroAssembler's CompareAndBranch function so it will handle
+// converting the comparison to Cbz/Cbnz if the right-hand side is 0.
+//
+// EmitInverted still compares the two operands but inverts the condition.
+class CompareAndBranch : public BranchGenerator {
+ public:
+ CompareAndBranch(LCodeGen* codegen,
+ Condition cond,
+ const Register& lhs,
+ const Operand& rhs)
+ : BranchGenerator(codegen),
+ cond_(cond),
+ lhs_(lhs),
+ rhs_(rhs) { }
+
+ virtual void Emit(Label* label) const {
+ __ CompareAndBranch(lhs_, rhs_, cond_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ CompareAndBranch(lhs_, rhs_, InvertCondition(cond_), label);
+ }
+
+ private:
+ Condition cond_;
+ const Register& lhs_;
+ const Operand& rhs_;
+};
+
+
+// Test the input with the given mask and branch if the condition holds.
+// If the condition is 'eq' or 'ne' this will use MacroAssembler's
+// TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
+// conversion to Tbz/Tbnz when possible.
+class TestAndBranch : public BranchGenerator {
+ public:
+ TestAndBranch(LCodeGen* codegen,
+ Condition cond,
+ const Register& value,
+ uint64_t mask)
+ : BranchGenerator(codegen),
+ cond_(cond),
+ value_(value),
+ mask_(mask) { }
+
+ virtual void Emit(Label* label) const {
+ switch (cond_) {
+ case eq:
+ __ TestAndBranchIfAllClear(value_, mask_, label);
+ break;
+ case ne:
+ __ TestAndBranchIfAnySet(value_, mask_, label);
+ break;
+ default:
+ __ Tst(value_, mask_);
+ __ B(cond_, label);
+ }
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ // The inverse of "all clear" is "any set" and vice versa.
+ switch (cond_) {
+ case eq:
+ __ TestAndBranchIfAnySet(value_, mask_, label);
+ break;
+ case ne:
+ __ TestAndBranchIfAllClear(value_, mask_, label);
+ break;
+ default:
+ __ Tst(value_, mask_);
+ __ B(InvertCondition(cond_), label);
+ }
+ }
+
+ private:
+ Condition cond_;
+ const Register& value_;
+ uint64_t mask_;
+};
+
+
+// Test the input and branch if it is non-zero and not a NaN.
+class BranchIfNonZeroNumber : public BranchGenerator {
+ public:
+ BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
+ const FPRegister& scratch)
+ : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
+
+ virtual void Emit(Label* label) const {
+ __ Fabs(scratch_, value_);
+ // Compare with 0.0. Because scratch_ is positive, the result can be one of
+ // nZCv (equal), nzCv (greater) or nzCV (unordered).
+ __ Fcmp(scratch_, 0.0);
+ __ B(gt, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ Fabs(scratch_, value_);
+ __ Fcmp(scratch_, 0.0);
+ __ B(le, label);
+ }
+
+ private:
+ const FPRegister& value_;
+ const FPRegister& scratch_;
+};
+
+
+// Test the input and branch if it is a heap number.
+class BranchIfHeapNumber : public BranchGenerator {
+ public:
+ BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
+ : BranchGenerator(codegen), value_(value) { }
+
+ virtual void Emit(Label* label) const {
+ __ JumpIfHeapNumber(value_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ JumpIfNotHeapNumber(value_, label);
+ }
+
+ private:
+ const Register& value_;
+};
+
+
+// Test the input and branch if it is the specified root value.
+class BranchIfRoot : public BranchGenerator {
+ public:
+ BranchIfRoot(LCodeGen* codegen, const Register& value,
+ Heap::RootListIndex index)
+ : BranchGenerator(codegen), value_(value), index_(index) { }
+
+ virtual void Emit(Label* label) const {
+ __ JumpIfRoot(value_, index_, label);
+ }
+
+ virtual void EmitInverted(Label* label) const {
+ __ JumpIfNotRoot(value_, index_, label);
+ }
+
+ private:
+ const Register& value_;
+ const Heap::RootListIndex index_;
+};
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+ Translation* translation) {
+ if (environment == NULL) return;
+
+ // The translation includes one command per value in the environment.
+ int translation_size = environment->translation_size();
+ // The output frame height does not include the parameters.
+ int height = translation_size - environment->parameter_count();
+
+ WriteTranslation(environment->outer(), translation);
+ bool has_closure_id = !info()->closure().is_null() &&
+ !info()->closure().is_identical_to(environment->closure());
+ int closure_id = has_closure_id
+ ? DefineDeoptimizationLiteral(environment->closure())
+ : Translation::kSelfLiteralId;
+
+ switch (environment->frame_type()) {
+ case JS_FUNCTION:
+ translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ break;
+ case JS_CONSTRUCT:
+ translation->BeginConstructStubFrame(closure_id, translation_size);
+ break;
+ case JS_GETTER:
+ ASSERT(translation_size == 1);
+ ASSERT(height == 0);
+ translation->BeginGetterStubFrame(closure_id);
+ break;
+ case JS_SETTER:
+ ASSERT(translation_size == 2);
+ ASSERT(height == 0);
+ translation->BeginSetterStubFrame(closure_id);
+ break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
+ case ARGUMENTS_ADAPTOR:
+ translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ int object_index = 0;
+ int dematerialized_index = 0;
+ for (int i = 0; i < translation_size; ++i) {
+ LOperand* value = environment->values()->at(i);
+
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(i),
+ environment->HasUint32ValueAt(i),
+ &object_index,
+ &dematerialized_index);
+ }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation,
+ LOperand* op,
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer,
+ dematerialized_index_pointer);
+ }
+ return;
+ }
+
+ if (op->IsStackSlot()) {
+ if (is_tagged) {
+ translation->StoreStackSlot(op->index());
+ } else if (is_uint32) {
+ translation->StoreUint32StackSlot(op->index());
+ } else {
+ translation->StoreInt32StackSlot(op->index());
+ }
+ } else if (op->IsDoubleStackSlot()) {
+ translation->StoreDoubleStackSlot(op->index());
+ } else if (op->IsRegister()) {
+ Register reg = ToRegister(op);
+ if (is_tagged) {
+ translation->StoreRegister(reg);
+ } else if (is_uint32) {
+ translation->StoreUint32Register(reg);
+ } else {
+ translation->StoreInt32Register(reg);
+ }
+ } else if (op->IsDoubleRegister()) {
+ DoubleRegister reg = ToDoubleRegister(op);
+ translation->StoreDoubleRegister(reg);
+ } else if (op->IsConstantOperand()) {
+ HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+ translation->StoreLiteral(src_index);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+ int result = deoptimization_literals_.length();
+ for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+ if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+ }
+ deoptimization_literals_.Add(literal, zone());
+ return result;
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode) {
+ if (!environment->HasBeenRegistered()) {
+ int frame_count = 0;
+ int jsframe_count = 0;
+ for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+ ++frame_count;
+ if (e->frame_type() == JS_FUNCTION) {
+ ++jsframe_count;
+ }
+ }
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
+ WriteTranslation(environment, &translation);
+ int deoptimization_index = deoptimizations_.length();
+ int pc_offset = masm()->pc_offset();
+ environment->Register(deoptimization_index,
+ translation.index(),
+ (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+ deoptimizations_.Add(environment, zone());
+ }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ ASSERT(instr != NULL);
+
+ Assembler::BlockPoolsScope scope(masm_);
+ __ Call(code, mode);
+ RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+
+ if ((code->kind() == Code::BINARY_OP_IC) ||
+ (code->kind() == Code::COMPARE_IC)) {
+ // Signal that we don't inline smi code before these stubs in the
+ // optimizing code generator.
+ InlineSmiCheckInfo::EmitNotInlined(masm());
+ }
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->function()).Is(x1));
+ ASSERT(ToRegister(instr->result()).Is(x0));
+
+ int arity = instr->arity();
+ CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->constructor()).is(x1));
+
+ __ Mov(x0, instr->arity());
+ // No cell in x2 for construct type feedback in optimized code.
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+
+ ASSERT(ToRegister(instr->result()).is(x0));
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->constructor()).is(x1));
+
+ __ Mov(x0, Operand(instr->arity()));
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+
+ ElementsKind kind = instr->hydrogen()->elements_kind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+
+ if (instr->arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(kind, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ } else if (instr->arity() == 1) {
+ Label done;
+ if (IsFastPackedElementsKind(kind)) {
+ Label packed_case;
+
+ // We might need to create a holey array; look at the first argument.
+ __ Peek(x10, 0);
+ __ Cbz(x10, &packed_case);
+
+ ElementsKind holey_kind = GetHoleyElementsKind(kind);
+ ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ B(&done);
+ __ Bind(&packed_case);
+ }
+
+ ArraySingleArgumentConstructorStub stub(kind, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ __ Bind(&done);
+ } else {
+ ArrayNArgumentsConstructorStub stub(kind, override_mode);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+ }
+
+ ASSERT(ToRegister(instr->result()).is(x0));
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function,
+ int num_arguments,
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
+ ASSERT(instr != NULL);
+
+ __ CallRuntime(function, num_arguments, save_doubles);
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ __ Mov(cp, ToRegister(context));
+ } else if (context->IsStackSlot()) {
+ __ Ldr(cp, ToMemOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ LoadHeapObject(cp,
+ Handle<HeapObject>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+ if (position == RelocInfo::kNoPosition) return;
+ masm()->positions_recorder()->RecordPosition(position);
+ masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode) {
+ if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+ RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+ } else {
+ ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ ASSERT(expected_safepoint_kind_ == kind);
+
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+ Safepoint safepoint = safepoints_.DefineSafepoint(
+ masm(), kind, arguments, deopt_mode);
+
+ for (int i = 0; i < operands->length(); i++) {
+ LOperand* pointer = operands->at(i);
+ if (pointer->IsStackSlot()) {
+ safepoint.DefinePointerSlot(pointer->index(), zone());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+ }
+ }
+
+ if (kind & Safepoint::kWithRegisters) {
+ // Register cp always contains a pointer to the context.
+ safepoint.DefinePointerRegister(cp, zone());
+ }
+}
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+ LPointerMap empty_pointers(zone());
+ RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegistersAndDoubles(
+ LPointerMap* pointers, int arguments, Safepoint::DeoptMode deopt_mode) {
+ RecordSafepoint(
+ pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
+}
+
+
+bool LCodeGen::GenerateCode() {
+ LPhase phase("Z_Code generation", chunk());
+ ASSERT(is_unused());
+ status_ = GENERATING;
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // NONE indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::NONE);
+
+ return GeneratePrologue() &&
+ GenerateBody() &&
+ GenerateDeferredCode() &&
+ GenerateDeoptJumpTable() &&
+ GenerateSafepointTable();
+}
+
+
+void LCodeGen::SaveCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator iterator(doubles);
+ int count = 0;
+ while (!iterator.Done()) {
+ // TODO(all): Is this supposed to save just the callee-saved doubles? It
+ // looks like it's saving all of them.
+ FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
+ __ Poke(value, count * kDoubleSize);
+ iterator.Advance();
+ count++;
+ }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator iterator(doubles);
+ int count = 0;
+ while (!iterator.Done()) {
+ // TODO(all): Is this supposed to restore just the callee-saved doubles? It
+ // looks like it's restoring all of them.
+ FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
+ __ Peek(value, count * kDoubleSize);
+ iterator.Advance();
+ count++;
+ }
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+ ASSERT(is_generating());
+
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+ // TODO(all): Add support for stop_t FLAG in DEBUG mode.
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ if (info_->this_has_uses() &&
+ info_->strict_mode() == SLOPPY &&
+ !info_->is_native()) {
+ Label ok;
+ int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
+ __ Peek(x10, receiver_offset);
+ __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
+
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+ __ Poke(x10, receiver_offset);
+
+ __ Bind(&ok);
+ }
+ }
+
+ ASSERT(__ StackPointer().Is(jssp));
+ info()->set_prologue_offset(masm_->pc_offset());
+ if (NeedsEagerFrame()) {
+ __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
+ frame_is_built_ = true;
+ info_->AddNoFrameRange(0, masm_->pc_offset());
+ }
+
+ // Reserve space for the stack slots needed by the code.
+ int slots = GetStackSlotCount();
+ if (slots > 0) {
+ __ Claim(slots, kPointerSize);
+ }
+
+ if (info()->saves_caller_doubles()) {
+ SaveCallerDoubles();
+ }
+
+ // Allocate a local context if needed.
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment(";;; Allocate local context");
+ // Argument to NewContext is the function, which is in x1.
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ Push(x1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+ }
+ RecordSafepoint(Safepoint::kNoLazyDeopt);
+ // Context is returned in x0. It replaces the context passed to us. It's
+ // saved in the stack and kept live in cp.
+ __ Mov(cp, x0);
+ __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* var = scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ Register value = x0;
+ Register scratch = x3;
+
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ Ldr(value, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ MemOperand target = ContextMemOperand(cp, var->index());
+ __ Str(value, target);
+ // Update the write barrier. This clobbers value and scratch.
+ __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
+ GetLinkRegisterState(), kSaveFPRegs);
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
+ // Trace the call.
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // We have not executed any compiled code yet, so cp still holds the
+ // incoming context.
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+ return !is_aborted();
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 0);
+ __ Claim(slots);
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
+ if (!instr->IsLazyBailout() && !instr->IsGap()) {
+ safepoints_.BumpLastLazySafepointIndex();
+ }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+ ASSERT(is_generating());
+ if (deferred_.length() > 0) {
+ for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
+ LDeferredCode* code = deferred_[i];
+
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+ Comment(";;; <@%d,#%d> "
+ "-------------------- Deferred %s --------------------",
+ code->instruction_index(),
+ code->instr()->hydrogen_value()->id(),
+ code->instr()->Mnemonic());
+
+ __ Bind(code->entry());
+
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Build frame");
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ __ Push(lr, fp, cp);
+ __ Mov(fp, Smi::FromInt(StackFrame::STUB));
+ __ Push(fp);
+ __ Add(fp, __ StackPointer(),
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ Comment(";;; Deferred code");
+ }
+
+ code->Generate();
+
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Destroy frame");
+ ASSERT(frame_is_built_);
+ __ Pop(xzr, cp, fp, lr);
+ frame_is_built_ = false;
+ }
+
+ __ B(code->exit());
+ }
+ }
+
+ // Force constant pool emission at the end of the deferred code to make
+ // sure that no constant pools are emitted after deferred code because
+ // deferred code generation is the last step which generates code. The two
+ // following steps will only output data used by crakshaft.
+ masm()->CheckConstPool(true, false);
+
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeoptJumpTable() {
+ if (deopt_jump_table_.length() > 0) {
+ Comment(";;; -------------------- Jump table --------------------");
+ }
+ Label table_start;
+ __ bind(&table_start);
+ Label needs_frame;
+ for (int i = 0; i < deopt_jump_table_.length(); i++) {
+ __ Bind(&deopt_jump_table_[i]->label);
+ Address entry = deopt_jump_table_[i]->address;
+ Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type;
+ int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ Comment(";;; jump table entry %d.", i);
+ } else {
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ }
+ if (deopt_jump_table_[i]->needs_frame) {
+ ASSERT(!info()->saves_caller_doubles());
+
+ UseScratchRegisterScope temps(masm());
+ Register stub_deopt_entry = temps.AcquireX();
+ Register stub_marker = temps.AcquireX();
+
+ __ Mov(stub_deopt_entry, ExternalReference::ForDeoptEntry(entry));
+ if (needs_frame.is_bound()) {
+ __ B(&needs_frame);
+ } else {
+ __ Bind(&needs_frame);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
+ __ Push(lr, fp, cp, stub_marker);
+ __ Add(fp, __ StackPointer(), 2 * kPointerSize);
+ __ Call(stub_deopt_entry);
+ }
+ } else {
+ if (info()->saves_caller_doubles()) {
+ ASSERT(info()->IsStub());
+ RestoreCallerDoubles();
+ }
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ }
+ masm()->CheckConstPool(false, false);
+ }
+
+ // Force constant pool emission at the end of the deopt jump table to make
+ // sure that no constant pools are emitted after.
+ masm()->CheckConstPool(true, false);
+
+ // The deoptimization jump table is the last part of the instruction
+ // sequence. Mark the generated code as done unless we bailed out.
+ if (!is_aborted()) status_ = DONE;
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+ ASSERT(is_done());
+ // We do not know how much data will be emitted for the safepoint table, so
+ // force emission of the veneer pool.
+ masm()->CheckVeneerPool(true, true);
+ safepoints_.Emit(masm(), GetStackSlotCount());
+ return !is_aborted();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+ ASSERT(is_done());
+ code->set_stack_slots(GetStackSlotCount());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
+ PopulateDeoptimizationData(code);
+ info()->CommitDependencies(code);
+}
+
+
+void LCodeGen::Abort(BailoutReason reason) {
+ info()->set_bailout_reason(reason);
+ status_ = ABORTED;
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+ int length = deoptimizations_.length();
+ if (length == 0) return;
+
+ Handle<DeoptimizationInputData> data =
+ factory()->NewDeoptimizationInputData(length, TENURED);
+
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
+ data->SetTranslationByteArray(*translations);
+ data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
+
+ Handle<FixedArray> literals =
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+ { AllowDeferredHandleDereference copy_handles;
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
+ }
+
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+ // Populate the deoptimization entries.
+ for (int i = 0; i < length; i++) {
+ LEnvironment* env = deoptimizations_[i];
+ data->SetAstId(i, env->ast_id());
+ data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+ data->SetArgumentsStackHeight(i,
+ Smi::FromInt(env->arguments_stack_height()));
+ data->SetPc(i, Smi::FromInt(env->pc_offset()));
+ }
+
+ code->set_deoptimization_data(*data);
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+ ASSERT(deoptimization_literals_.length() == 0);
+
+ const ZoneList<Handle<JSFunction> >* inlined_closures =
+ chunk()->inlined_closures();
+
+ for (int i = 0, length = inlined_closures->length(); i < length; i++) {
+ DefineDeoptimizationLiteral(inlined_closures->at(i));
+ }
+
+ inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::DeoptimizeBranch(
+ LEnvironment* environment,
+ BranchType branch_type, Register reg, int bit,
+ Deoptimizer::BailoutType* override_bailout_type) {
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+ Deoptimizer::BailoutType bailout_type =
+ info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+
+ if (override_bailout_type != NULL) {
+ bailout_type = *override_bailout_type;
+ }
+
+ ASSERT(environment->HasBeenRegistered());
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ int id = environment->deoptimization_index();
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+
+ if (entry == NULL) {
+ Abort(kBailoutWasNotPrepared);
+ }
+
+ if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ Label not_zero;
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+
+ __ Push(x0, x1, x2);
+ __ Mrs(x2, NZCV);
+ __ Mov(x0, count);
+ __ Ldr(w1, MemOperand(x0));
+ __ Subs(x1, x1, 1);
+ __ B(gt, &not_zero);
+ __ Mov(w1, FLAG_deopt_every_n_times);
+ __ Str(w1, MemOperand(x0));
+ __ Pop(x2, x1, x0);
+ ASSERT(frame_is_built_);
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ __ Unreachable();
+
+ __ Bind(&not_zero);
+ __ Str(w1, MemOperand(x0));
+ __ Msr(NZCV, x2);
+ __ Pop(x2, x1, x0);
+ }
+
+ if (info()->ShouldTrapOnDeopt()) {
+ Label dont_trap;
+ __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
+ __ Debug("trap_on_deopt", __LINE__, BREAK);
+ __ Bind(&dont_trap);
+ }
+
+ ASSERT(info()->IsStub() || frame_is_built_);
+ // Go through jump table if we need to build frame, or restore caller doubles.
+ if (branch_type == always &&
+ frame_is_built_ && !info()->saves_caller_doubles()) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (deopt_jump_table_.is_empty() ||
+ (deopt_jump_table_.last()->address != entry) ||
+ (deopt_jump_table_.last()->bailout_type != bailout_type) ||
+ (deopt_jump_table_.last()->needs_frame != !frame_is_built_)) {
+ Deoptimizer::JumpTableEntry* table_entry =
+ new(zone()) Deoptimizer::JumpTableEntry(entry,
+ bailout_type,
+ !frame_is_built_);
+ deopt_jump_table_.Add(table_entry, zone());
+ }
+ __ B(&deopt_jump_table_.last()->label,
+ branch_type, reg, bit);
+ }
+}
+
+
+void LCodeGen::Deoptimize(LEnvironment* environment,
+ Deoptimizer::BailoutType* override_bailout_type) {
+ DeoptimizeBranch(environment, always, NoReg, -1, override_bailout_type);
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) {
+ DeoptimizeBranch(environment, static_cast<BranchType>(cond));
+}
+
+
+void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) {
+ DeoptimizeBranch(environment, reg_zero, rt);
+}
+
+
+void LCodeGen::DeoptimizeIfNotZero(Register rt, LEnvironment* environment) {
+ DeoptimizeBranch(environment, reg_not_zero, rt);
+}
+
+
+void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
+ int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
+ DeoptimizeIfBitSet(rt, sign_bit, environment);
+}
+
+
+void LCodeGen::DeoptimizeIfSmi(Register rt,
+ LEnvironment* environment) {
+ DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), environment);
+}
+
+
+void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) {
+ DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), environment);
+}
+
+
+void LCodeGen::DeoptimizeIfRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment) {
+ __ CompareRoot(rt, index);
+ DeoptimizeIf(eq, environment);
+}
+
+
+void LCodeGen::DeoptimizeIfNotRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment) {
+ __ CompareRoot(rt, index);
+ DeoptimizeIf(ne, environment);
+}
+
+
+void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input,
+ LEnvironment* environment) {
+ __ TestForMinusZero(input);
+ DeoptimizeIf(vs, environment);
+}
+
+
+void LCodeGen::DeoptimizeIfBitSet(Register rt,
+ int bit,
+ LEnvironment* environment) {
+ DeoptimizeBranch(environment, reg_bit_set, rt, bit);
+}
+
+
+void LCodeGen::DeoptimizeIfBitClear(Register rt,
+ int bit,
+ LEnvironment* environment) {
+ DeoptimizeBranch(environment, reg_bit_clear, rt, bit);
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ intptr_t current_pc = masm()->pc_offset();
+
+ if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
+ ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ ASSERT((padding_size % kInstructionSize) == 0);
+ InstructionAccurateScope instruction_accurate(
+ masm(), padding_size / kInstructionSize);
+
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= kInstructionSize;
+ }
+ }
+ }
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+ // TODO(all): support zero register results, as ToRegister32.
+ ASSERT((op != NULL) && op->IsRegister());
+ return Register::FromAllocationIndex(op->index());
+}
+
+
+Register LCodeGen::ToRegister32(LOperand* op) const {
+ ASSERT(op != NULL);
+ if (op->IsConstantOperand()) {
+ // If this is a constant operand, the result must be the zero register.
+ ASSERT(ToInteger32(LConstantOperand::cast(op)) == 0);
+ return wzr;
+ } else {
+ return ToRegister(op).W();
+ }
+}
+
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return Smi::FromInt(constant->Integer32Value());
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+ ASSERT((op != NULL) && op->IsDoubleRegister());
+ return DoubleRegister::FromAllocationIndex(op->index());
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+ ASSERT(op != NULL);
+ if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsSmi()) {
+ ASSERT(constant->HasSmiValue());
+ return Operand(Smi::FromInt(constant->Integer32Value()));
+ } else if (r.IsInteger32()) {
+ ASSERT(constant->HasInteger32Value());
+ return Operand(constant->Integer32Value());
+ } else if (r.IsDouble()) {
+ Abort(kToOperandUnsupportedDoubleImmediate);
+ }
+ ASSERT(r.IsTagged());
+ return Operand(constant->handle(isolate()));
+ } else if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ } else if (op->IsDoubleRegister()) {
+ Abort(kToOperandIsDoubleRegisterUnimplemented);
+ return Operand(0);
+ }
+ // Stack slots not implemented, use ToMemOperand instead.
+ UNREACHABLE();
+ return Operand(0);
+}
+
+
+Operand LCodeGen::ToOperand32I(LOperand* op) {
+ return ToOperand32(op, SIGNED_INT32);
+}
+
+
+Operand LCodeGen::ToOperand32U(LOperand* op) {
+ return ToOperand32(op, UNSIGNED_INT32);
+}
+
+
+Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
+ ASSERT(op != NULL);
+ if (op->IsRegister()) {
+ return Operand(ToRegister32(op));
+ } else if (op->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ HConstant* constant = chunk()->LookupConstant(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(constant->HasInteger32Value());
+ return Operand(signedness == SIGNED_INT32
+ ? constant->Integer32Value()
+ : static_cast<uint32_t>(constant->Integer32Value()));
+ } else {
+ // Other constants not implemented.
+ Abort(kToOperand32UnsupportedImmediate);
+ }
+ }
+ // Other cases are not implemented.
+ UNREACHABLE();
+ return Operand(0);
+}
+
+
+static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) {
+ ASSERT(index < 0);
+ return -(index + 1) * kPointerSize;
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+ ASSERT(op != NULL);
+ ASSERT(!op->IsRegister());
+ ASSERT(!op->IsDoubleRegister());
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, StackSlotOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(masm()->StackPointer(),
+ ArgumentsOffsetWithoutFrame(op->index()));
+ }
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+ return constant->handle(isolate());
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ return constant->Integer32Value();
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+ HConstant* constant = chunk_->LookupConstant(op);
+ ASSERT(constant->HasDoubleValue());
+ return constant->DoubleValue();
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+ Condition cond = nv;
+ switch (op) {
+ case Token::EQ:
+ case Token::EQ_STRICT:
+ cond = eq;
+ break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = ne;
+ break;
+ case Token::LT:
+ cond = is_unsigned ? lo : lt;
+ break;
+ case Token::GT:
+ cond = is_unsigned ? hi : gt;
+ break;
+ case Token::LTE:
+ cond = is_unsigned ? ls : le;
+ break;
+ case Token::GTE:
+ cond = is_unsigned ? hs : ge;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+ return cond;
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchGeneric(InstrType instr,
+ const BranchGenerator& branch) {
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+
+ int next_block = GetNextEmittedBlock();
+
+ if (right_block == left_block) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
+ } else if (right_block == next_block) {
+ branch.Emit(chunk_->GetAssemblyLabel(left_block));
+ } else {
+ branch.Emit(chunk_->GetAssemblyLabel(left_block));
+ __ B(chunk_->GetAssemblyLabel(right_block));
+ }
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
+ ASSERT((condition != al) && (condition != nv));
+ BranchOnCondition branch(this, condition);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitCompareAndBranch(InstrType instr,
+ Condition condition,
+ const Register& lhs,
+ const Operand& rhs) {
+ ASSERT((condition != al) && (condition != nv));
+ CompareAndBranch branch(this, condition, lhs, rhs);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitTestAndBranch(InstrType instr,
+ Condition condition,
+ const Register& value,
+ uint64_t mask) {
+ ASSERT((condition != al) && (condition != nv));
+ TestAndBranch branch(this, condition, value, mask);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
+ const FPRegister& value,
+ const FPRegister& scratch) {
+ BranchIfNonZeroNumber branch(this, value, scratch);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
+ const Register& value) {
+ BranchIfHeapNumber branch(this, value);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchIfRoot(InstrType instr,
+ const Register& value,
+ Heap::RootListIndex index) {
+ BranchIfRoot branch(this, value, index);
+ EmitBranchGeneric(instr, branch);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+ for (int i = LGap::FIRST_INNER_POSITION;
+ i <= LGap::LAST_INNER_POSITION;
+ i++) {
+ LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+ LParallelMove* move = gap->GetParallelMove(inner_pos);
+ if (move != NULL) {
+ resolver_.Resolve(move);
+ }
+ }
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+ Register arguments = ToRegister(instr->arguments());
+ Register result = ToRegister(instr->result());
+
+ // The pointer to the arguments array come from DoArgumentsElements.
+ // It does not point directly to the arguments and there is an offest of
+ // two words that we must take into account when accessing an argument.
+ // Subtracting the index from length accounts for one, so we add one more.
+
+ if (instr->length()->IsConstantOperand() &&
+ instr->index()->IsConstantOperand()) {
+ int index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int length = ToInteger32(LConstantOperand::cast(instr->length()));
+ int offset = ((length - index) + 1) * kPointerSize;
+ __ Ldr(result, MemOperand(arguments, offset));
+ } else if (instr->index()->IsConstantOperand()) {
+ Register length = ToRegister32(instr->length());
+ int index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int loc = index - 1;
+ if (loc != 0) {
+ __ Sub(result.W(), length, loc);
+ __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
+ } else {
+ __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
+ }
+ } else {
+ Register length = ToRegister32(instr->length());
+ Operand index = ToOperand32I(instr->index());
+ __ Sub(result.W(), length, index);
+ __ Add(result.W(), result.W(), 1);
+ __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
+ }
+}
+
+
+void LCodeGen::DoAddE(LAddE* instr) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = (instr->right()->IsConstantOperand())
+ ? ToInteger32(LConstantOperand::cast(instr->right()))
+ : Operand(ToRegister32(instr->right()), SXTW);
+
+ ASSERT(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
+ __ Add(result, left, right);
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToOperand32I(instr->right());
+ if (can_overflow) {
+ __ Adds(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Add(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoAddS(LAddS* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+ if (can_overflow) {
+ __ Adds(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Add(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate: public LDeferredCode {
+ public:
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LAllocate* instr_;
+ };
+
+ DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
+ }
+
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ if (size <= Page::kMaxRegularHeapObjectSize) {
+ __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
+ } else {
+ __ B(deferred->entry());
+ }
+ } else {
+ Register size = ToRegister32(instr->size());
+ __ Sxtw(size.X(), size);
+ __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
+ }
+
+ __ Bind(deferred->exit());
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ Register filler_count = temp1;
+ Register filler = temp2;
+ Register untagged_result = ToRegister(instr->temp3());
+
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Mov(filler_count, size / kPointerSize);
+ } else {
+ __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2);
+ }
+
+ __ Sub(untagged_result, result, kHeapObjectTag);
+ __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
+ __ FillFields(untagged_result, filler_count, filler);
+ } else {
+ ASSERT(instr->temp3() == NULL);
+ }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ // We're in a SafepointRegistersScope so we can use any scratch registers.
+ Register size = x0;
+ if (instr->size()->IsConstantOperand()) {
+ __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
+ } else {
+ __ SmiTag(size, ToRegister32(instr->size()).X());
+ }
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
+ if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
+ } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+ ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
+ } else {
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+ }
+ __ Mov(x10, Smi::FromInt(flags));
+ __ Push(size, x10);
+
+ CallRuntimeFromDeferred(
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister32(instr->length());
+
+ Register elements = ToRegister(instr->elements());
+ Register scratch = x5;
+ ASSERT(receiver.Is(x0)); // Used for parameter count.
+ ASSERT(function.Is(x1)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).Is(x0));
+ ASSERT(instr->IsMarkedAsCall());
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ const uint32_t kArgumentsLimit = 1 * KB;
+ __ Cmp(length, kArgumentsLimit);
+ DeoptimizeIf(hi, instr->environment());
+
+ // Push the receiver and use the register to keep the original
+ // number of arguments.
+ __ Push(receiver);
+ Register argc = receiver;
+ receiver = NoReg;
+ __ Sxtw(argc, length);
+ // The arguments are at a one pointer size offset from elements.
+ __ Add(elements, elements, 1 * kPointerSize);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack.
+ Label invoke, loop;
+ // length is a small non-negative integer, due to the test above.
+ __ Cbz(length, &invoke);
+ __ Bind(&loop);
+ __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
+ __ Push(scratch);
+ __ Subs(length, length, 1);
+ __ B(ne, &loop);
+
+ __ Bind(&invoke);
+ ASSERT(instr->HasPointerMap());
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
+ // The number of arguments is stored in argc (receiver) which is x0, as
+ // expected by InvokeFunction.
+ ParameterCount actual(argc);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+ Register result = ToRegister(instr->result());
+
+ if (instr->hydrogen()->from_inlined()) {
+ // When we are inside an inlined function, the arguments are the last things
+ // that have been pushed on the stack. Therefore the arguments array can be
+ // accessed directly from jssp.
+ // However in the normal case, it is accessed via fp but there are two words
+ // on the stack between fp and the arguments (the saved lr and fp) and the
+ // LAccessArgumentsAt implementation take that into account.
+ // In the inlined case we need to subtract the size of 2 words to jssp to
+ // get a pointer which will work well with LAccessArgumentsAt.
+ ASSERT(masm()->StackPointer().Is(jssp));
+ __ Sub(result, jssp, 2 * kPointerSize);
+ } else {
+ ASSERT(instr->temp() != NULL);
+ Register previous_fp = ToRegister(instr->temp());
+
+ __ Ldr(previous_fp,
+ MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(result,
+ MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
+ __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ Csel(result, fp, previous_fp, ne);
+ }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister32(instr->result());
+ Label done;
+
+ // If no arguments adaptor frame the number of arguments is fixed.
+ __ Cmp(fp, elements);
+ __ Mov(result, scope()->num_parameters());
+ __ B(eq, &done);
+
+ // Arguments adaptor frame present. Get argument length from there.
+ __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(result,
+ UntagSmiMemOperand(result.X(),
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ // Argument length is in result register.
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ DoubleRegister left = ToDoubleRegister(instr->left());
+ DoubleRegister right = ToDoubleRegister(instr->right());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+
+ switch (instr->op()) {
+ case Token::ADD: __ Fadd(result, left, right); break;
+ case Token::SUB: __ Fsub(result, left, right); break;
+ case Token::MUL: __ Fmul(result, left, right); break;
+ case Token::DIV: __ Fdiv(result, left, right); break;
+ case Token::MOD: {
+ // The ECMA-262 remainder operator is the remainder from a truncating
+ // (round-towards-zero) division. Note that this differs from IEEE-754.
+ //
+ // TODO(jbramley): See if it's possible to do this inline, rather than by
+ // calling a helper function. With frintz (to produce the intermediate
+ // quotient) and fmsub (to calculate the remainder without loss of
+ // precision), it should be possible. However, we would need support for
+ // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
+ // support that yet.
+ ASSERT(left.Is(d0));
+ ASSERT(right.Is(d1));
+ __ CallCFunction(
+ ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ ASSERT(result.Is(d0));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->left()).is(x1));
+ ASSERT(ToRegister(instr->right()).is(x0));
+ ASSERT(ToRegister(instr->result()).is(x0));
+
+ BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToOperand32U(instr->right());
+
+ switch (instr->op()) {
+ case Token::BIT_AND: __ And(result, left, right); break;
+ case Token::BIT_OR: __ Orr(result, left, right); break;
+ case Token::BIT_XOR: __ Eor(result, left, right); break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::DoBitS(LBitS* instr) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+
+ switch (instr->op()) {
+ case Token::BIT_AND: __ And(result, left, right); break;
+ case Token::BIT_OR: __ Orr(result, left, right); break;
+ case Token::BIT_XOR: __ Eor(result, left, right); break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
+ if (FLAG_debug_code && check->hydrogen()->skip_check()) {
+ __ Assert(InvertCondition(cc), kEliminatedBoundsCheckFailed);
+ } else {
+ DeoptimizeIf(cc, check->environment());
+ }
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
+ if (instr->hydrogen()->skip_check()) return;
+
+ ASSERT(instr->hydrogen()->length()->representation().IsInteger32());
+ Register length = ToRegister32(instr->length());
+
+ if (instr->index()->IsConstantOperand()) {
+ int constant_index =
+ ToInteger32(LConstantOperand::cast(instr->index()));
+
+ if (instr->hydrogen()->length()->representation().IsSmi()) {
+ __ Cmp(length, Smi::FromInt(constant_index));
+ } else {
+ __ Cmp(length, constant_index);
+ }
+ } else {
+ ASSERT(instr->hydrogen()->index()->representation().IsInteger32());
+ __ Cmp(length, ToRegister32(instr->index()));
+ }
+ Condition condition = instr->hydrogen()->allow_equality() ? lo : ls;
+ ApplyCheckIf(condition, instr);
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+
+ if (r.IsInteger32()) {
+ ASSERT(!info()->IsStub());
+ EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
+ } else if (r.IsSmi()) {
+ ASSERT(!info()->IsStub());
+ STATIC_ASSERT(kSmiTag == 0);
+ EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
+ } else if (r.IsDouble()) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ // Test the double value. Zero and NaN are false.
+ EmitBranchIfNonZeroNumber(instr, value, double_scratch());
+ } else {
+ ASSERT(r.IsTagged());
+ Register value = ToRegister(instr->value());
+ HType type = instr->hydrogen()->value()->type();
+
+ if (type.IsBoolean()) {
+ ASSERT(!info()->IsStub());
+ __ CompareRoot(value, Heap::kTrueValueRootIndex);
+ EmitBranch(instr, eq);
+ } else if (type.IsSmi()) {
+ ASSERT(!info()->IsStub());
+ EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
+ } else if (type.IsJSArray()) {
+ ASSERT(!info()->IsStub());
+ EmitGoto(instr->TrueDestination(chunk()));
+ } else if (type.IsHeapNumber()) {
+ ASSERT(!info()->IsStub());
+ __ Ldr(double_scratch(), FieldMemOperand(value,
+ HeapNumber::kValueOffset));
+ // Test the double value. Zero and NaN are false.
+ EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
+ } else if (type.IsString()) {
+ ASSERT(!info()->IsStub());
+ Register temp = ToRegister(instr->temp1());
+ __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
+ EmitCompareAndBranch(instr, ne, temp, 0);
+ } else {
+ ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+ // Avoid deopts in the case where we've never executed this path before.
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+ // undefined -> false.
+ __ JumpIfRoot(
+ value, Heap::kUndefinedValueRootIndex, false_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+ // Boolean -> its value.
+ __ JumpIfRoot(
+ value, Heap::kTrueValueRootIndex, true_label);
+ __ JumpIfRoot(
+ value, Heap::kFalseValueRootIndex, false_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+ // 'null' -> false.
+ __ JumpIfRoot(
+ value, Heap::kNullValueRootIndex, false_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::SMI)) {
+ // Smis: 0 -> false, all other -> true.
+ ASSERT(Smi::FromInt(0) == 0);
+ __ Cbz(value, false_label);
+ __ JumpIfSmi(value, true_label);
+ } else if (expected.NeedsMap()) {
+ // If we need a map later and have a smi, deopt.
+ DeoptimizeIfSmi(value, instr->environment());
+ }
+
+ Register map = NoReg;
+ Register scratch = NoReg;
+
+ if (expected.NeedsMap()) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ map = ToRegister(instr->temp1());
+ scratch = ToRegister(instr->temp2());
+
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+
+ if (expected.CanBeUndetectable()) {
+ // Undetectable -> false.
+ __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(
+ scratch, 1 << Map::kIsUndetectable, false_label);
+ }
+ }
+
+ if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+ // spec object -> true.
+ __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
+ __ B(ge, true_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::STRING)) {
+ // String value -> false iff empty.
+ Label not_string;
+ __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
+ __ B(ge, &not_string);
+ __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
+ __ Cbz(scratch, false_label);
+ __ B(true_label);
+ __ Bind(&not_string);
+ }
+
+ if (expected.Contains(ToBooleanStub::SYMBOL)) {
+ // Symbol value -> true.
+ __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
+ __ B(eq, true_label);
+ }
+
+ if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ Label not_heap_number;
+ __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
+
+ __ Ldr(double_scratch(),
+ FieldMemOperand(value, HeapNumber::kValueOffset));
+ __ Fcmp(double_scratch(), 0.0);
+ // If we got a NaN (overflow bit is set), jump to the false branch.
+ __ B(vs, false_label);
+ __ B(eq, false_label);
+ __ B(true_label);
+ __ Bind(&not_heap_number);
+ }
+
+ if (!expected.IsGeneric()) {
+ // We've seen something for the first time -> deopt.
+ // This can only happen if we are not generic already.
+ Deoptimize(instr->environment());
+ }
+ }
+ }
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
+ int arity,
+ LInstruction* instr,
+ Register function_reg) {
+ bool dont_adapt_arguments =
+ formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ bool can_invoke_directly =
+ dont_adapt_arguments || formal_parameter_count == arity;
+
+ // The function interface relies on the following register assignments.
+ ASSERT(function_reg.Is(x1) || function_reg.IsNone());
+ Register arity_reg = x0;
+
+ LPointerMap* pointers = instr->pointer_map();
+
+ // If necessary, load the function object.
+ if (function_reg.IsNone()) {
+ function_reg = x1;
+ __ LoadObject(function_reg, function);
+ }
+
+ if (FLAG_debug_code) {
+ Label is_not_smi;
+ // Try to confirm that function_reg (x1) is a tagged pointer.
+ __ JumpIfNotSmi(function_reg, &is_not_smi);
+ __ Abort(kExpectedFunctionObject);
+ __ Bind(&is_not_smi);
+ }
+
+ if (can_invoke_directly) {
+ // Change context.
+ __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
+
+ // Set the arguments count if adaption is not needed. Assumes that x0 is
+ // available to write to at this point.
+ if (dont_adapt_arguments) {
+ __ Mov(arity_reg, arity);
+ }
+
+ // Invoke function.
+ __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+ __ Call(x10);
+
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ } else {
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(arity);
+ ParameterCount expected(formal_parameter_count);
+ __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ }
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->result()).Is(x0));
+
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ // TODO(all): on ARM we use a call descriptor to specify a storage mode
+ // but on ARM64 we only have one storage mode so it isn't necessary. Check
+ // this understanding is correct.
+ __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
+ } else {
+ ASSERT(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(target);
+ }
+ generator.AfterCall();
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToRegister(instr->function()).is(x1));
+
+ if (instr->hydrogen()->pass_argument_count()) {
+ __ Mov(x0, Operand(instr->arity()));
+ }
+
+ // Change context.
+ __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
+ // Load the code entry address
+ __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
+ __ Call(x10);
+
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+ CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->result()).is(x0));
+ switch (instr->hydrogen()->major_key()) {
+ case CodeStub::RegExpExec: {
+ RegExpExecStub stub;
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::SubString: {
+ SubStringStub stub;
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringCompare: {
+ StringCompareStub stub;
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ Register temp = ToRegister(instr->temp());
+ {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Push(object);
+ __ Mov(cp, 0);
+ __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(x0, temp);
+ }
+ DeoptimizeIfSmi(temp, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ class DeferredCheckMaps: public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+ : LDeferredCode(codegen), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ virtual void Generate() {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
+ if (instr->hydrogen()->CanOmitMapChecks()) {
+ ASSERT(instr->value() == NULL);
+ ASSERT(instr->temp() == NULL);
+ return;
+ }
+
+ Register object = ToRegister(instr->value());
+ Register map_reg = ToRegister(instr->temp());
+
+ __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
+
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->has_migration_target()) {
+ deferred = new(zone()) DeferredCheckMaps(this, instr, object);
+ __ Bind(deferred->check_maps());
+ }
+
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
+ Label success;
+ for (int i = 0; i < map_set.size(); i++) {
+ Handle<Map> map = map_set.at(i).handle();
+ __ CompareMap(map_reg, map);
+ __ B(eq, &success);
+ }
+
+ // We didn't match a map.
+ if (instr->hydrogen()->has_migration_target()) {
+ __ B(deferred->entry());
+ } else {
+ Deoptimize(instr->environment());
+ }
+
+ __ Bind(&success);
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
+ }
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+ Register value = ToRegister(instr->value());
+ ASSERT(!instr->result() || ToRegister(instr->result()).Is(value));
+ DeoptimizeIfNotSmi(value, instr->environment());
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+
+ __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ if (instr->hydrogen()->is_interval_check()) {
+ InstanceType first, last;
+ instr->hydrogen()->GetCheckInterval(&first, &last);
+
+ __ Cmp(scratch, first);
+ if (first == last) {
+ // If there is only one type in the interval check for equality.
+ DeoptimizeIf(ne, instr->environment());
+ } else if (last == LAST_TYPE) {
+ // We don't need to compare with the higher bound of the interval.
+ DeoptimizeIf(lo, instr->environment());
+ } else {
+ // If we are below the lower bound, set the C flag and clear the Z flag
+ // to force a deopt.
+ __ Ccmp(scratch, last, CFlag, hs);
+ DeoptimizeIf(hi, instr->environment());
+ }
+ } else {
+ uint8_t mask;
+ uint8_t tag;
+ instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+ if (IsPowerOf2(mask)) {
+ ASSERT((tag == 0) || (tag == mask));
+ if (tag == 0) {
+ DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr->environment());
+ } else {
+ DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr->environment());
+ }
+ } else {
+ if (tag == 0) {
+ __ Tst(scratch, mask);
+ } else {
+ __ And(scratch, scratch, mask);
+ __ Cmp(scratch, tag);
+ }
+ DeoptimizeIf(ne, instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->unclamped());
+ Register result = ToRegister32(instr->result());
+ __ ClampDoubleToUint8(result, input, double_scratch());
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ Register input = ToRegister32(instr->unclamped());
+ Register result = ToRegister32(instr->result());
+ __ ClampInt32ToUint8(result, input);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ Register input = ToRegister(instr->unclamped());
+ Register result = ToRegister32(instr->result());
+ Register scratch = ToRegister(instr->temp1());
+ Label done;
+
+ // Both smi and heap number cases are handled.
+ Label is_not_smi;
+ __ JumpIfNotSmi(input, &is_not_smi);
+ __ SmiUntag(result.X(), input);
+ __ ClampInt32ToUint8(result);
+ __ B(&done);
+
+ __ Bind(&is_not_smi);
+
+ // Check for heap number.
+ Label is_heap_number;
+ __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number);
+
+ // Check for undefined. Undefined is coverted to zero for clamping conversion.
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
+ instr->environment());
+ __ Mov(result, 0);
+ __ B(&done);
+
+ // Heap number case.
+ __ Bind(&is_heap_number);
+ DoubleRegister dbl_scratch = double_scratch();
+ DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2());
+ __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ DoubleRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ Fmov(result_reg, value_reg);
+ __ Mov(result_reg, Operand(result_reg, LSR, 32));
+ } else {
+ __ Fmov(result_reg.W(), value_reg.S());
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ Register temp = ToRegister(instr->temp());
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+
+ __ And(temp, lo_reg, Operand(0xffffffff));
+ __ Orr(temp, temp, Operand(hi_reg, LSL, 32));
+ __ Fmov(result_reg, temp);
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+ Handle<String> class_name = instr->hydrogen()->class_name();
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+ Register input = ToRegister(instr->value());
+ Register scratch1 = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(input, false_label);
+
+ Register map = scratch2;
+ if (class_name->IsUtf8EqualTo(CStrVector("Function"))) {
+ // Assuming the following assertions, we can use the same compares to test
+ // for both being a function type and being in the object type range.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+
+ // We expect CompareObjectType to load the object instance type in scratch1.
+ __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
+ __ B(lt, false_label);
+ __ B(eq, true_label);
+ __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
+ __ B(eq, true_label);
+ } else {
+ __ IsObjectJSObjectType(input, map, scratch1, false_label);
+ }
+
+ // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+ // Check if the constructor in the map is a function.
+ __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
+
+ // Objects with a non-function constructor have class 'Object'.
+ if (class_name->IsUtf8EqualTo(CStrVector("Object"))) {
+ __ JumpIfNotObjectType(
+ scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
+ } else {
+ __ JumpIfNotObjectType(
+ scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label);
+ }
+
+ // The constructor function is in scratch1. Get its instance class name.
+ __ Ldr(scratch1,
+ FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(scratch1,
+ FieldMemOperand(scratch1,
+ SharedFunctionInfo::kInstanceClassNameOffset));
+
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
+ // classes and it doesn't have to because you can't access it with natives
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
+ EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
+}
+
+
+void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ FPRegister object = ToDoubleRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+
+ // If we don't have a NaN, we don't have the hole, so branch now to avoid the
+ // (relatively expensive) hole-NaN check.
+ __ Fcmp(object, object);
+ __ B(vc, instr->FalseLabel(chunk_));
+
+ // We have a NaN, but is it the hole?
+ __ Fmov(temp, object);
+ EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
+}
+
+
+void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
+ ASSERT(instr->hydrogen()->representation().IsTagged());
+ Register object = ToRegister(instr->object());
+
+ EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+ Register value = ToRegister(instr->value());
+ Register map = ToRegister(instr->temp());
+
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ ASSERT(!rep.IsInteger32());
+ Register scratch = ToRegister(instr->temp());
+
+ if (rep.IsDouble()) {
+ __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
+ instr->TrueLabel(chunk()));
+ } else {
+ Register value = ToRegister(instr->value());
+ __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
+ instr->FalseLabel(chunk()), DO_SMI_CHECK);
+ __ Ldr(double_scratch(), FieldMemOperand(value, HeapNumber::kValueOffset));
+ __ JumpIfMinusZero(double_scratch(), instr->TrueLabel(chunk()));
+ }
+ EmitGoto(instr->FalseDestination(chunk()));
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ Condition cond = TokenToCondition(instr->op(), false);
+
+ if (left->IsConstantOperand() && right->IsConstantOperand()) {
+ // We can statically evaluate the comparison.
+ double left_val = ToDouble(LConstantOperand::cast(left));
+ double right_val = ToDouble(LConstantOperand::cast(right));
+ int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+ instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+ EmitGoto(next_block);
+ } else {
+ if (instr->is_double()) {
+ if (right->IsConstantOperand()) {
+ __ Fcmp(ToDoubleRegister(left),
+ ToDouble(LConstantOperand::cast(right)));
+ } else if (left->IsConstantOperand()) {
+ // Transpose the operands and reverse the condition.
+ __ Fcmp(ToDoubleRegister(right),
+ ToDouble(LConstantOperand::cast(left)));
+ cond = ReverseConditionForCmp(cond);
+ } else {
+ __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
+ }
+
+ // If a NaN is involved, i.e. the result is unordered (V set),
+ // jump to false block label.
+ __ B(vs, instr->FalseLabel(chunk_));
+ EmitBranch(instr, cond);
+ } else {
+ if (instr->hydrogen_value()->representation().IsInteger32()) {
+ if (right->IsConstantOperand()) {
+ EmitCompareAndBranch(instr,
+ cond,
+ ToRegister32(left),
+ ToOperand32I(right));
+ } else {
+ // Transpose the operands and reverse the condition.
+ EmitCompareAndBranch(instr,
+ ReverseConditionForCmp(cond),
+ ToRegister32(right),
+ ToOperand32I(left));
+ }
+ } else {
+ ASSERT(instr->hydrogen_value()->representation().IsSmi());
+ if (right->IsConstantOperand()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
+ EmitCompareAndBranch(instr,
+ cond,
+ ToRegister(left),
+ Operand(Smi::FromInt(value)));
+ } else if (left->IsConstantOperand()) {
+ // Transpose the operands and reverse the condition.
+ int32_t value = ToInteger32(LConstantOperand::cast(left));
+ EmitCompareAndBranch(instr,
+ ReverseConditionForCmp(cond),
+ ToRegister(right),
+ Operand(Smi::FromInt(value)));
+ } else {
+ EmitCompareAndBranch(instr,
+ cond,
+ ToRegister(left),
+ ToRegister(right));
+ }
+ }
+ }
+ }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+ EmitCompareAndBranch(instr, eq, left, right);
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Token::Value op = instr->op();
+ Condition cond = TokenToCondition(op, false);
+
+ ASSERT(ToRegister(instr->left()).Is(x1));
+ ASSERT(ToRegister(instr->right()).Is(x0));
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ // Signal that we don't inline smi code before this stub.
+ InlineSmiCheckInfo::EmitNotInlined(masm());
+
+ // Return true or false depending on CompareIC result.
+ // This instruction is marked as call. We can clobber any register.
+ ASSERT(instr->IsMarkedAsCall());
+ __ LoadTrueFalseRoots(x1, x2);
+ __ Cmp(x0, 0);
+ __ Csel(ToRegister(instr->result()), x1, x2, cond);
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+ ASSERT(instr->result()->IsDoubleRegister());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Fmov(result, instr->value());
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+ __ Mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+ ASSERT(is_int32(instr->value()));
+ // Cast the value here to ensure that the value isn't sign extended by the
+ // implicit Operand constructor.
+ __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+ __ Mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+ Handle<Object> value = instr->value(isolate());
+ AllowDeferredHandleDereference smi_check;
+ __ LoadObject(ToRegister(instr->result()), value);
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+ // If there is a non-return use, the context must be moved to a register.
+ Register result = ToRegister(instr->result());
+ if (info()->IsOptimizing()) {
+ __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in cp.
+ ASSERT(result.is(cp));
+ }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+ Register reg = ToRegister(instr->value());
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
+ AllowDeferredHandleDereference smi_check;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ UseScratchRegisterScope temps(masm());
+ Register temp = temps.AcquireX();
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ __ Mov(temp, Operand(Handle<Object>(cell)));
+ __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
+ __ Cmp(reg, temp);
+ } else {
+ __ Cmp(reg, Operand(object));
+ }
+ DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDateField(LDateField* instr) {
+ Register object = ToRegister(instr->date());
+ Register result = ToRegister(instr->result());
+ Register temp1 = x10;
+ Register temp2 = x11;
+ Smi* index = instr->index();
+ Label runtime, done, deopt, obj_ok;
+
+ ASSERT(object.is(result) && object.Is(x0));
+ ASSERT(instr->IsMarkedAsCall());
+
+ __ JumpIfSmi(object, &deopt);
+ __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
+ __ B(eq, &obj_ok);
+
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ __ Bind(&obj_ok);
+ if (index->value() == 0) {
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ Mov(temp1, Operand(stamp));
+ __ Ldr(temp1, MemOperand(temp1));
+ __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ Cmp(temp1, temp2);
+ __ B(ne, &runtime);
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ B(&done);
+ }
+
+ __ Bind(&runtime);
+ __ Mov(x1, Operand(index));
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ }
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
+ type = Deoptimizer::LAZY;
+ }
+
+ Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
+ Deoptimize(instr->environment(), &type);
+}
+
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister32(instr->result());
+ ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ Cmp(dividend, 0);
+ DeoptimizeIf(eq, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ Cmp(dividend, kMinInt);
+ DeoptimizeIf(eq, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ Tst(dividend, mask);
+ DeoptimizeIf(ne, instr->environment());
+ }
+
+ if (divisor == -1) { // Nice shortcut, not needed for correctness.
+ __ Neg(result, dividend);
+ return;
+ }
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (shift == 0) {
+ __ Mov(result, dividend);
+ } else if (shift == 1) {
+ __ Add(result, dividend, Operand(dividend, LSR, 31));
+ } else {
+ __ Mov(result, Operand(dividend, ASR, 31));
+ __ Add(result, dividend, Operand(result, LSR, 32 - shift));
+ }
+ if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
+ if (divisor < 0) __ Neg(result, result);
+}
+
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister32(instr->result());
+ ASSERT(!AreAliased(dividend, result));
+
+ if (divisor == 0) {
+ Deoptimize(instr->environment());
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIfZero(dividend, instr->environment());
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Neg(result, result);
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ Register temp = ToRegister32(instr->temp());
+ ASSERT(!AreAliased(dividend, result, temp));
+ __ Sxtw(dividend.X(), dividend);
+ __ Mov(temp, divisor);
+ __ Smsubl(temp.X(), result, temp, dividend.X());
+ DeoptimizeIfNotZero(temp, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister32(instr->left());
+ Register divisor = ToRegister32(instr->right());
+ Register result = ToRegister32(instr->result());
+
+ // Issue the division first, and then check for any deopt cases whilst the
+ // result is computed.
+ __ Sdiv(result, dividend, divisor);
+
+ if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ ASSERT_EQ(NULL, instr->temp());
+ return;
+ }
+
+ Label deopt;
+ // Check for x / 0.
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ Cbz(divisor, &deopt);
+ }
+
+ // Check for (0 / -x) as that will produce negative zero.
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Cmp(divisor, 0);
+
+ // If the divisor < 0 (mi), compare the dividend, and deopt if it is
+ // zero, ie. zero dividend with negative divisor deopts.
+ // If the divisor >= 0 (pl, the opposite of mi) set the flags to
+ // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
+ __ Ccmp(dividend, 0, NoFlag, mi);
+ __ B(eq, &deopt);
+ }
+
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ // Test dividend for kMinInt by subtracting one (cmp) and checking for
+ // overflow.
+ __ Cmp(dividend, 1);
+ // If overflow is set, ie. dividend = kMinInt, compare the divisor with
+ // -1. If overflow is clear, set the flags for condition ne, as the
+ // dividend isn't -1, and thus we shouldn't deopt.
+ __ Ccmp(divisor, -1, NoFlag, vs);
+ __ B(eq, &deopt);
+ }
+
+ // Compute remainder and deopt if it's not zero.
+ Register remainder = ToRegister32(instr->temp());
+ __ Msub(remainder, result, divisor, dividend);
+ __ Cbnz(remainder, &deopt);
+
+ Label div_ok;
+ __ B(&div_ok);
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+ __ Bind(&div_ok);
+}
+
+
+void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister32(instr->result());
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIfMinusZero(input, instr->environment());
+ }
+
+ __ TryConvertDoubleToInt32(result, input, double_scratch());
+ DeoptimizeIf(ne, instr->environment());
+
+ if (instr->tag_result()) {
+ __ SmiTag(result.X());
+ }
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) {
+ __ Drop(instr->count());
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // FunctionLiteral instruction is marked as call, we can trash any register.
+ ASSERT(instr->IsMarkedAsCall());
+
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ bool pretenure = instr->hydrogen()->pretenure();
+ if (!pretenure && instr->hydrogen()->has_no_literals()) {
+ FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
+ instr->hydrogen()->is_generator());
+ __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
+ __ Mov(x1, Operand(pretenure ? factory()->true_value()
+ : factory()->false_value()));
+ __ Push(cp, x2, x1);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
+ }
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+ Register map = ToRegister(instr->map());
+ Register result = ToRegister(instr->result());
+ Label load_cache, done;
+
+ __ EnumLengthUntagged(result, map);
+ __ Cbnz(result, &load_cache);
+
+ __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
+ __ B(&done);
+
+ __ Bind(&load_cache);
+ __ LoadInstanceDescriptors(map, result);
+ __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+ __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+ DeoptimizeIfZero(result, instr->environment());
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ Register object = ToRegister(instr->object());
+ Register null_value = x5;
+
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(object.Is(x0));
+
+ Label deopt;
+
+ __ JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &deopt);
+
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ Cmp(object, null_value);
+ __ B(eq, &deopt);
+
+ __ JumpIfSmi(object, &deopt);
+
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
+ __ B(le, &deopt);
+
+ Label use_cache, call_runtime;
+ __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
+
+ __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ B(&use_cache);
+
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ // Get the set of properties to enumerate.
+ __ Bind(&call_runtime);
+ __ Push(object);
+ CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+ __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x1, Heap::kMetaMapRootIndex, &deopt);
+
+ __ Bind(&use_cache);
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ __ AssertString(input);
+
+ // Assert that we can use a W register load to get the hash.
+ ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
+ __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
+ __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+ // Do not emit jump if we are emitting a goto to the next block.
+ if (!IsNextEmittedBlock(block)) {
+ __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
+ }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+ EmitGoto(instr->block_id());
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+ LHasCachedArrayIndexAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister32(instr->temp());
+
+ // Assert that the cache status bits fit in a W register.
+ ASSERT(is_uint32(String::kContainsCachedArrayIndexMask));
+ __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
+ __ Tst(temp, String::kContainsCachedArrayIndexMask);
+ EmitBranch(instr, eq);
+}
+
+
+// HHasInstanceTypeAndBranch instruction is built with an interval of type
+// to test but is only used in very restricted ways. The only possible kinds
+// of intervals are:
+// - [ FIRST_TYPE, instr->to() ]
+// - [ instr->form(), LAST_TYPE ]
+// - instr->from() == instr->to()
+//
+// These kinds of intervals can be check with only one compare instruction
+// providing the correct value and test condition are used.
+//
+// TestType() will return the value to use in the compare instruction and
+// BranchCondition() will return the condition to use depending on the kind
+// of interval actually specified in the instruction.
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == FIRST_TYPE) return to;
+ ASSERT((from == to) || (to == LAST_TYPE));
+ return from;
+}
+
+
+// See comment above TestType function for what this function does.
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == to) return eq;
+ if (to == LAST_TYPE) return hs;
+ if (from == FIRST_TYPE) return ls;
+ UNREACHABLE();
+ return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+ __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
+ EmitBranch(instr, BranchCondition(instr->hydrogen()));
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+ Register result = ToRegister(instr->result());
+ Register base = ToRegister(instr->base_object());
+ if (instr->offset()->IsConstantOperand()) {
+ __ Add(result, base, ToOperand32I(instr->offset()));
+ } else {
+ __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
+ }
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // Assert that the arguments are in the registers expected by InstanceofStub.
+ ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left()));
+ ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right()));
+
+ InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+
+ // InstanceofStub returns a result in x0:
+ // 0 => not an instance
+ // smi 1 => instance.
+ __ Cmp(x0, 0);
+ __ LoadTrueFalseRoots(x0, x1);
+ __ Csel(x0, x0, x1, eq);
+}
+
+
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ public:
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+ LInstanceOfKnownGlobal* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LInstanceOfKnownGlobal* instr_;
+ };
+
+ DeferredInstanceOfKnownGlobal* deferred =
+ new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
+
+ Label map_check, return_false, cache_miss, done;
+ Register object = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ // x4 is expected in the associated deferred code and stub.
+ Register map_check_site = x4;
+ Register map = x5;
+
+ // This instruction is marked as call. We can clobber any register.
+ ASSERT(instr->IsMarkedAsCall());
+
+ // We must take into account that object is in x11.
+ ASSERT(object.Is(x11));
+ Register scratch = x10;
+
+ // A Smi is not instance of anything.
+ __ JumpIfSmi(object, &return_false);
+
+ // This is the inlined call site instanceof cache. The two occurences of the
+ // hole value will be patched to the last map/result pair generated by the
+ // instanceof stub.
+ __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ {
+ // Below we use Factory::the_hole_value() on purpose instead of loading from
+ // the root array to force relocation and later be able to patch with a
+ // custom value.
+ InstructionAccurateScope scope(masm(), 5);
+ __ bind(&map_check);
+ // Will be patched with the cached map.
+ Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
+ __ LoadRelocated(scratch, Operand(Handle<Object>(cell)));
+ __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ cmp(map, scratch);
+ __ b(&cache_miss, ne);
+ // The address of this instruction is computed relative to the map check
+ // above, so check the size of the code generated.
+ ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4);
+ // Will be patched with the cached result.
+ __ LoadRelocated(result, Operand(factory()->the_hole_value()));
+ }
+ __ B(&done);
+
+ // The inlined call site cache did not match.
+ // Check null and string before calling the deferred code.
+ __ Bind(&cache_miss);
+ // Compute the address of the map check. It must not be clobbered until the
+ // InstanceOfStub has used it.
+ __ Adr(map_check_site, &map_check);
+ // Null is not instance of anything.
+ __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
+
+ // String values are not instances of anything.
+ // Return false if the object is a string. Otherwise, jump to the deferred
+ // code.
+ // Note that we can't jump directly to deferred code from
+ // IsObjectJSStringType, because it uses tbz for the jump and the deferred
+ // code can be out of range.
+ __ IsObjectJSStringType(object, scratch, NULL, &return_false);
+ __ B(deferred->entry());
+
+ __ Bind(&return_false);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+
+ // Here result is either true or false.
+ __ Bind(deferred->exit());
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ Register result = ToRegister(instr->result());
+ ASSERT(result.Is(x0)); // InstanceofStub returns its result in x0.
+ InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kArgsInRegisters);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kReturnTrueFalseObject);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kCallSiteInlineCheck);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
+
+ // Prepare InstanceofStub arguments.
+ ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left()));
+ __ LoadObject(InstanceofStub::right(), instr->function());
+
+ InstanceofStub stub(flags);
+ CallCodeGeneric(stub.GetCode(isolate()),
+ RelocInfo::CODE_TARGET,
+ instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+
+ // Put the result value into the result register slot.
+ __ StoreToSafepointRegisterSlot(result, result);
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+ DoGap(instr);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ Register value = ToRegister32(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Scvtf(result, value);
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // The function is required to be in x1.
+ ASSERT(ToRegister(instr->function()).is(x1));
+ ASSERT(instr->HasPointerMap());
+
+ Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+ if (known_function.is_null()) {
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
+ } else {
+ CallKnownFunction(known_function,
+ instr->hydrogen()->formal_parameter_count(),
+ instr->arity(),
+ instr,
+ x1);
+ }
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ // Get the frame pointer for the calling frame.
+ __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
+ __ Cmp(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(ne, &check_frame_marker);
+ __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ Bind(&check_frame_marker);
+ __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
+
+ EmitCompareAndBranch(
+ instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+ Label* is_object = instr->TrueLabel(chunk_);
+ Label* is_not_object = instr->FalseLabel(chunk_);
+ Register value = ToRegister(instr->value());
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, is_not_object);
+ __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
+
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+
+ // Check for undetectable objects.
+ __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
+
+ // Check that instance type is in object type range.
+ __ IsInstanceJSObjectType(map, scratch, NULL);
+ // Flags have been updated by IsInstanceJSObjectType. We can now test the
+ // flags for "le" condition to check if the object's type is a valid
+ // JS object type.
+ EmitBranch(instr, le);
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string,
+ SmiCheck check_needed = INLINE_SMI_CHECK) {
+ if (check_needed == INLINE_SMI_CHECK) {
+ __ JumpIfSmi(input, is_not_string);
+ }
+ __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
+
+ return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+ Register val = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ Condition true_cond =
+ EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
+
+ EmitBranch(instr, true_cond);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+ Register value = ToRegister(instr->value());
+ STATIC_ASSERT(kSmiTag == 0);
+ EmitTestAndBranch(instr, eq, value, kSmiTagMask);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ if (!instr->hydrogen()->value()->IsHeapObject()) {
+ __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+ }
+ __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+
+ EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
+}
+
+
+static const char* LabelType(LLabel* label) {
+ if (label->is_loop_header()) return " (loop header)";
+ if (label->is_osr_entry()) return " (OSR entry)";
+ return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+ Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+ current_instruction_,
+ label->hydrogen_value()->id(),
+ label->block_id(),
+ LabelType(label));
+
+ __ Bind(label->label());
+ current_block_ = label->block_id();
+ DoGap(label);
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
+ instr->environment());
+ } else {
+ Label not_the_hole;
+ __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ Bind(&not_the_hole);
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+ Label deopt;
+
+ // Check that the function really is a function. Leaves map in the result
+ // register.
+ __ JumpIfNotObjectType(function, result, temp, JS_FUNCTION_TYPE, &deopt);
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ __ Ldrb(temp, FieldMemOperand(result, Map::kBitFieldOffset));
+ __ Tbnz(temp, Map::kHasNonInstancePrototype, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ __ Ldr(result, FieldMemOperand(function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check that the function has a prototype or an initial map.
+ __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &deopt);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ __ CompareObjectType(result, temp, temp, MAP_TYPE);
+ __ B(ne, &done);
+
+ // Get the prototype from the initial map.
+ __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ __ B(&done);
+
+ // Non-instance prototype: fetch prototype from constructor field in initial
+ // map.
+ __ Bind(&non_instance);
+ __ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+ __ B(&done);
+
+ // Deoptimize case.
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ // All done.
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
+ Register result = ToRegister(instr->result());
+ __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
+ __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ DeoptimizeIfRoot(
+ result, Heap::kTheHoleValueRootIndex, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->global_object()).Is(x0));
+ ASSERT(ToRegister(instr->result()).Is(x0));
+ __ Mov(x2, Operand(instr->name()));
+ ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
+ Register key,
+ Register base,
+ Register scratch,
+ bool key_is_smi,
+ bool key_is_constant,
+ int constant_key,
+ ElementsKind elements_kind,
+ int additional_index) {
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
+ ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
+ : 0;
+
+ if (key_is_constant) {
+ int base_offset = ((constant_key + additional_index) << element_size_shift);
+ return MemOperand(base, base_offset + additional_offset);
+ }
+
+ if (additional_index == 0) {
+ if (key_is_smi) {
+ // Key is smi: untag, and scale by element size.
+ __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
+ return MemOperand(scratch, additional_offset);
+ } else {
+ // Key is not smi, and element size is not byte: scale by element size.
+ if (additional_offset == 0) {
+ return MemOperand(base, key, SXTW, element_size_shift);
+ } else {
+ __ Add(scratch, base, Operand(key, SXTW, element_size_shift));
+ return MemOperand(scratch, additional_offset);
+ }
+ }
+ } else {
+ // TODO(all): Try to combine these cases a bit more intelligently.
+ if (additional_offset == 0) {
+ if (key_is_smi) {
+ __ SmiUntag(scratch, key);
+ __ Add(scratch.W(), scratch.W(), additional_index);
+ } else {
+ __ Add(scratch.W(), key.W(), additional_index);
+ }
+ return MemOperand(base, scratch, LSL, element_size_shift);
+ } else {
+ if (key_is_smi) {
+ __ Add(scratch, base,
+ Operand::UntagSmiAndScale(key, element_size_shift));
+ } else {
+ __ Add(scratch, base, Operand(key, SXTW, element_size_shift));
+ }
+ return MemOperand(
+ scratch,
+ (additional_index << element_size_shift) + additional_offset);
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
+ Register ext_ptr = ToRegister(instr->elements());
+ Register scratch;
+ ElementsKind elements_kind = instr->elements_kind();
+
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ int constant_key = 0;
+ if (key_is_constant) {
+ ASSERT(instr->temp() == NULL);
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ scratch = ToRegister(instr->temp());
+ key = ToRegister(instr->key());
+ }
+
+ MemOperand mem_op =
+ PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
+ key_is_constant, constant_key,
+ elements_kind,
+ instr->additional_index());
+
+ if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
+ (elements_kind == FLOAT32_ELEMENTS)) {
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Ldr(result.S(), mem_op);
+ __ Fcvt(result, result.S());
+ } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
+ (elements_kind == FLOAT64_ELEMENTS)) {
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Ldr(result, mem_op);
+ } else {
+ Register result = ToRegister(instr->result());
+
+ switch (elements_kind) {
+ case EXTERNAL_INT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ __ Ldrsb(result, mem_op);
+ break;
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ __ Ldrb(result, mem_op);
+ break;
+ case EXTERNAL_INT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ __ Ldrsh(result, mem_op);
+ break;
+ case EXTERNAL_UINT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ __ Ldrh(result, mem_op);
+ break;
+ case EXTERNAL_INT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ __ Ldrsw(result, mem_op);
+ break;
+ case EXTERNAL_UINT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ __ Ldr(result.W(), mem_op);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ // Deopt if value > 0x80000000.
+ __ Tst(result, 0xFFFFFFFF80000000);
+ DeoptimizeIf(ne, instr->environment());
+ }
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::CalcKeyedArrayBaseRegister(Register base,
+ Register elements,
+ Register key,
+ bool key_is_tagged,
+ ElementsKind elements_kind) {
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+
+ // Even though the HLoad/StoreKeyed instructions force the input
+ // representation for the key to be an integer, the input gets replaced during
+ // bounds check elimination with the index argument to the bounds check, which
+ // can be tagged, so that case must be handled here, too.
+ if (key_is_tagged) {
+ __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
+ } else {
+ // Sign extend key because it could be a 32-bit negative value or contain
+ // garbage in the top 32-bits. The address computation happens in 64-bit.
+ ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
+ __ Add(base, elements, Operand(key, SXTW, element_size_shift));
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
+ Register elements = ToRegister(instr->elements());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ Register load_base;
+ int offset = 0;
+
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(instr->hydrogen()->RequiresHoleCheck() ||
+ (instr->temp() == NULL));
+
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
+ instr->additional_index());
+ load_base = elements;
+ } else {
+ load_base = ToRegister(instr->temp());
+ Register key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+ CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind());
+ offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
+ }
+ __ Ldr(result, FieldMemOperand(load_base, offset));
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ Register scratch = ToRegister(instr->temp());
+
+ // TODO(all): Is it faster to reload this value to an integer register, or
+ // move from fp to integer?
+ __ Fmov(scratch, result);
+ __ Cmp(scratch, kHoleNanInt64);
+ DeoptimizeIf(eq, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+ Register load_base;
+ int offset = 0;
+
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(instr->temp() == NULL);
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ load_base = elements;
+ } else {
+ load_base = ToRegister(instr->temp());
+ Register key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+ CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind());
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ Representation representation = instr->hydrogen()->representation();
+
+ if (representation.IsInteger32() &&
+ instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS) {
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ __ Load(result, UntagSmiFieldMemOperand(load_base, offset),
+ Representation::Integer32());
+ } else {
+ __ Load(result, FieldMemOperand(load_base, offset),
+ representation);
+ }
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ DeoptimizeIfNotSmi(result, instr->environment());
+ } else {
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
+ instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->object()).Is(x1));
+ ASSERT(ToRegister(instr->key()).Is(x0));
+
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ ASSERT(ToRegister(instr->result()).Is(x0));
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ HObjectAccess access = instr->hydrogen()->access();
+ int offset = access.offset();
+ Register object = ToRegister(instr->object());
+
+ if (access.IsExternalMemory()) {
+ Register result = ToRegister(instr->result());
+ __ Load(result, MemOperand(object, offset), access.representation());
+ return;
+ }
+
+ if (instr->hydrogen()->representation().IsDouble()) {
+ FPRegister result = ToDoubleRegister(instr->result());
+ __ Ldr(result, FieldMemOperand(object, offset));
+ return;
+ }
+
+ Register result = ToRegister(instr->result());
+ Register source;
+ if (access.IsInobject()) {
+ source = object;
+ } else {
+ // Load the properties array, using result as a scratch register.
+ __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ source = result;
+ }
+
+ if (access.representation().IsSmi() &&
+ instr->hydrogen()->representation().IsInteger32()) {
+ // Read int value directly from upper half of the smi.
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ __ Load(result, UntagSmiFieldMemOperand(source, offset),
+ Representation::Integer32());
+ } else {
+ __ Load(result, FieldMemOperand(source, offset), access.representation());
+ }
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ // LoadIC expects x2 to hold the name, and x0 to hold the receiver.
+ ASSERT(ToRegister(instr->object()).is(x0));
+ __ Mov(x2, Operand(instr->name()));
+
+ Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ ASSERT(ToRegister(instr->result()).is(x0));
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->value());
+ __ EnumLengthSmi(result, map);
+}
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsDouble()) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Fabs(result, input);
+ } else if (r.IsSmi() || r.IsInteger32()) {
+ Register input = r.IsSmi() ? ToRegister(instr->value())
+ : ToRegister32(instr->value());
+ Register result = r.IsSmi() ? ToRegister(instr->result())
+ : ToRegister32(instr->result());
+ Label done;
+ __ Abs(result, input, NULL, &done);
+ Deoptimize(instr->environment());
+ __ Bind(&done);
+ }
+}
+
+
+void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
+ Label* exit,
+ Label* allocation_entry) {
+ // Handle the tricky cases of MathAbsTagged:
+ // - HeapNumber inputs.
+ // - Negative inputs produce a positive result, so a new HeapNumber is
+ // allocated to hold it.
+ // - Positive inputs are returned as-is, since there is no need to allocate
+ // a new HeapNumber for the result.
+ // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
+ // a smi. In this case, the inline code sets the result and jumps directly
+ // to the allocation_entry label.
+ ASSERT(instr->context() != NULL);
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Register input = ToRegister(instr->value());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+ Register result_bits = ToRegister(instr->temp3());
+ Register result = ToRegister(instr->result());
+
+ Label runtime_allocation;
+
+ // Deoptimize if the input is not a HeapNumber.
+ __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
+ DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex,
+ instr->environment());
+
+ // If the argument is positive, we can return it as-is, without any need to
+ // allocate a new HeapNumber for the result. We have to do this in integer
+ // registers (rather than with fabs) because we need to be able to distinguish
+ // the two zeroes.
+ __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
+ __ Mov(result, input);
+ __ Tbz(result_bits, kXSignBit, exit);
+
+ // Calculate abs(input) by clearing the sign bit.
+ __ Bic(result_bits, result_bits, kXSignMask);
+
+ // Allocate a new HeapNumber to hold the result.
+ // result_bits The bit representation of the (double) result.
+ __ Bind(allocation_entry);
+ __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
+ // The inline (non-deferred) code will store result_bits into result.
+ __ B(exit);
+
+ __ Bind(&runtime_allocation);
+ if (FLAG_debug_code) {
+ // Because result is in the pointer map, we need to make sure it has a valid
+ // tagged value before we call the runtime. We speculatively set it to the
+ // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
+ // be valid.
+ Label result_ok;
+ Register input = ToRegister(instr->value());
+ __ JumpIfSmi(result, &result_ok);
+ __ Cmp(input, result);
+ __ Assert(eq, kUnexpectedValue);
+ __ Bind(&result_ok);
+ }
+
+ { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
+ instr->context());
+ __ StoreToSafepointRegisterSlot(x0, result);
+ }
+ // The inline (non-deferred) code will store result_bits into result.
+}
+
+
+void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
+ // Class for deferred case.
+ class DeferredMathAbsTagged: public LDeferredCode {
+ public:
+ DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredMathAbsTagged(instr_, exit(),
+ allocation_entry());
+ }
+ virtual LInstruction* instr() { return instr_; }
+ Label* allocation_entry() { return &allocation; }
+ private:
+ LMathAbsTagged* instr_;
+ Label allocation;
+ };
+
+ // TODO(jbramley): The early-exit mechanism would skip the new frame handling
+ // in GenerateDeferredCode. Tidy this up.
+ ASSERT(!NeedsDeferredFrame());
+
+ DeferredMathAbsTagged* deferred =
+ new(zone()) DeferredMathAbsTagged(this, instr);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged() ||
+ instr->hydrogen()->value()->representation().IsSmi());
+ Register input = ToRegister(instr->value());
+ Register result_bits = ToRegister(instr->temp3());
+ Register result = ToRegister(instr->result());
+ Label done;
+
+ // Handle smis inline.
+ // We can treat smis as 64-bit integers, since the (low-order) tag bits will
+ // never get set by the negation. This is therefore the same as the Integer32
+ // case in DoMathAbs, except that it operates on 64-bit values.
+ STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
+
+ __ JumpIfNotSmi(input, deferred->entry());
+
+ __ Abs(result, input, NULL, &done);
+
+ // The result is the magnitude (abs) of the smallest value a smi can
+ // represent, encoded as a double.
+ __ Mov(result_bits, double_to_rawbits(0x80000000));
+ __ B(deferred->allocation_entry());
+
+ __ Bind(deferred->exit());
+ __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
+ DoubleRegister double_temp2 = double_scratch();
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+ Register temp3 = ToRegister(instr->temp3());
+
+ MathExpGenerator::EmitMathExp(masm(), input, result,
+ double_temp1, double_temp2,
+ temp1, temp2, temp3);
+}
+
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+ // TODO(jbramley): If we could provide a double result, we could use frintm
+ // and produce a valid double result in a single instruction.
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIfMinusZero(input, instr->environment());
+ }
+
+ __ Fcvtms(result, input);
+
+ // Check that the result fits into a 32-bit integer.
+ // - The result did not overflow.
+ __ Cmp(result, Operand(result, SXTW));
+ // - The input was not NaN.
+ __ Fccmp(input, input, NoFlag, eq);
+ DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ Register result = ToRegister32(instr->result());
+ int32_t divisor = instr->divisor();
+
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ if (divisor == 1) return;
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ Mov(result, Operand(dividend, ASR, shift));
+ return;
+ }
+
+ // If the divisor is negative, we have to negate and handle edge cases.
+ Label not_kmin_int, done;
+ __ Negs(result, dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ // Note that we could emit branch-free code, but that would need one more
+ // register.
+ if (divisor == -1) {
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ B(vc, &not_kmin_int);
+ __ Mov(result, kMinInt / divisor);
+ __ B(&done);
+ }
+ }
+ __ bind(&not_kmin_int);
+ __ Mov(result, Operand(dividend, ASR, shift));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister32(instr->result());
+ ASSERT(!AreAliased(dividend, result));
+
+ if (divisor == 0) {
+ Deoptimize(instr->environment());
+ return;
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ Cmp(dividend, 0);
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Neg(result, result);
+ return;
+ }
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister32(instr->temp());
+ ASSERT(!AreAliased(temp, dividend, result));
+ Label needs_adjustment, done;
+ __ Cmp(dividend, 0);
+ __ B(divisor > 0 ? lt : gt, &needs_adjustment);
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Neg(result, result);
+ __ B(&done);
+ __ bind(&needs_adjustment);
+ __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(result, temp, Abs(divisor));
+ if (divisor < 0) __ Neg(result, result);
+ __ Sub(result, result, Operand(1));
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ Register divisor = ToRegister32(instr->divisor());
+ Register remainder = ToRegister32(instr->temp());
+ Register result = ToRegister32(instr->result());
+
+ // This can't cause an exception on ARM, so we can speculatively
+ // execute it already now.
+ __ Sdiv(result, dividend, divisor);
+
+ // Check for x / 0.
+ DeoptimizeIfZero(divisor, instr->environment());
+
+ // Check for (kMinInt / -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // The V flag will be set iff dividend == kMinInt.
+ __ Cmp(dividend, 1);
+ __ Ccmp(divisor, -1, NoFlag, vs);
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Cmp(divisor, 0);
+ __ Ccmp(dividend, 0, ZFlag, mi);
+ // "divisor" can't be null because the code would have already been
+ // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
+ // In this case we need to deoptimize to produce a -0.
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ Label done;
+ // If both operands have the same sign then we are done.
+ __ Eor(remainder, dividend, divisor);
+ __ Tbz(remainder, kWSignBit, &done);
+
+ // Check if the result needs to be corrected.
+ __ Msub(remainder, result, divisor, dividend);
+ __ Cbz(remainder, &done);
+ __ Sub(result, result, 1);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+ ASSERT(instr->IsMarkedAsCall());
+ ASSERT(ToDoubleRegister(instr->value()).is(d0));
+ __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
+ 0, 1);
+ ASSERT(ToDoubleRegister(instr->result()).Is(d0));
+}
+
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister32(instr->value());
+ Register result = ToRegister32(instr->result());
+ __ Clz(result, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ Label done;
+
+ // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
+ // Math.pow(-Infinity, 0.5) == +Infinity
+ // Math.pow(-0.0, 0.5) == +0.0
+
+ // Catch -infinity inputs first.
+ // TODO(jbramley): A constant infinity register would be helpful here.
+ __ Fmov(double_scratch(), kFP64NegativeInfinity);
+ __ Fcmp(double_scratch(), input);
+ __ Fabs(result, input);
+ __ B(&done, eq);
+
+ // Add +0.0 to convert -0.0 to +0.0.
+ __ Fadd(double_scratch(), input, fp_zero);
+ __ Fsqrt(result, double_scratch());
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+ Representation exponent_type = instr->hydrogen()->right()->representation();
+ // Having marked this as a call, we can use any registers.
+ // Just make sure that the input/output registers are the expected ones.
+ ASSERT(!instr->right()->IsDoubleRegister() ||
+ ToDoubleRegister(instr->right()).is(d1));
+ ASSERT(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
+ ToRegister(instr->right()).is(x11));
+ ASSERT(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12));
+ ASSERT(ToDoubleRegister(instr->left()).is(d0));
+ ASSERT(ToDoubleRegister(instr->result()).is(d0));
+
+ if (exponent_type.IsSmi()) {
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(x11, &no_deopt);
+ __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset));
+ DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
+ instr->environment());
+ __ Bind(&no_deopt);
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsInteger32()) {
+ // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
+ // supports large integer exponents.
+ Register exponent = ToRegister(instr->right());
+ __ Sxtw(exponent, exponent);
+ MathPowStub stub(MathPowStub::INTEGER);
+ __ CallStub(&stub);
+ } else {
+ ASSERT(exponent_type.IsDouble());
+ MathPowStub stub(MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ }
+}
+
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+ // TODO(jbramley): We could provide a double result here using frint.
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister temp1 = ToDoubleRegister(instr->temp1());
+ Register result = ToRegister(instr->result());
+ Label try_rounding;
+ Label done;
+
+ // Math.round() rounds to the nearest integer, with ties going towards
+ // +infinity. This does not match any IEEE-754 rounding mode.
+ // - Infinities and NaNs are propagated unchanged, but cause deopts because
+ // they can't be represented as integers.
+ // - The sign of the result is the same as the sign of the input. This means
+ // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
+ // result of -0.0.
+
+ DoubleRegister dot_five = double_scratch();
+ __ Fmov(dot_five, 0.5);
+ __ Fabs(temp1, input);
+ __ Fcmp(temp1, dot_five);
+ // If input is in [-0.5, -0], the result is -0.
+ // If input is in [+0, +0.5[, the result is +0.
+ // If the input is +0.5, the result is 1.
+ __ B(hi, &try_rounding); // hi so NaN will also branch.
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Fmov(result, input);
+ DeoptimizeIfNegative(result, instr->environment()); // [-0.5, -0.0].
+ }
+ __ Fcmp(input, dot_five);
+ __ Mov(result, 1); // +0.5.
+ // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
+ // flag kBailoutOnMinusZero, will return 0 (xzr).
+ __ Csel(result, result, xzr, eq);
+ __ B(&done);
+
+ __ Bind(&try_rounding);
+ // Since we're providing a 32-bit result, we can implement ties-to-infinity by
+ // adding 0.5 to the input, then taking the floor of the result. This does not
+ // work for very large positive doubles because adding 0.5 would cause an
+ // intermediate rounding stage, so a different approach will be necessary if a
+ // double result is needed.
+ __ Fadd(temp1, input, dot_five);
+ __ Fcvtms(result, temp1);
+
+ // Deopt if
+ // * the input was NaN
+ // * the result is not representable using a 32-bit integer.
+ __ Fcmp(input, 0.0);
+ __ Ccmp(result, Operand(result.W(), SXTW), NoFlag, vc);
+ DeoptimizeIf(ne, instr->environment());
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Fsqrt(result, input);
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ HMathMinMax::Operation op = instr->hydrogen()->operation();
+ if (instr->hydrogen()->representation().IsInteger32()) {
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToOperand32I(instr->right());
+
+ __ Cmp(left, right);
+ __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
+ } else if (instr->hydrogen()->representation().IsSmi()) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+
+ __ Cmp(left, right);
+ __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
+ } else {
+ ASSERT(instr->hydrogen()->representation().IsDouble());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister left = ToDoubleRegister(instr->left());
+ DoubleRegister right = ToDoubleRegister(instr->right());
+
+ if (op == HMathMinMax::kMathMax) {
+ __ Fmax(result, left, right);
+ } else {
+ ASSERT(op == HMathMinMax::kMathMin);
+ __ Fmin(result, left, right);
+ }
+ }
+}
+
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister32(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
+ HMod* hmod = instr->hydrogen();
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ Cmp(dividend, 0);
+ __ B(pl, &dividend_is_not_negative);
+ // Note that this is correct even for kMinInt operands.
+ __ Neg(dividend, dividend);
+ __ And(dividend, dividend, mask);
+ __ Negs(dividend, dividend);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ }
+ __ B(&done);
+ }
+
+ __ bind(&dividend_is_not_negative);
+ __ And(dividend, dividend, mask);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister32(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister32(instr->result());
+ Register temp = ToRegister32(instr->temp());
+ ASSERT(!AreAliased(dividend, result, temp));
+
+ if (divisor == 0) {
+ Deoptimize(instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ __ Sxtw(dividend.X(), dividend);
+ __ Mov(temp, Abs(divisor));
+ __ Smsubl(result.X(), result, temp, dividend.X());
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ Cbnz(result, &remainder_not_zero);
+ DeoptimizeIfNegative(dividend, instr->environment());
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ Register dividend = ToRegister32(instr->left());
+ Register divisor = ToRegister32(instr->right());
+ Register result = ToRegister32(instr->result());
+
+ Label deopt, done;
+ // modulo = dividend - quotient * divisor
+ __ Sdiv(result, dividend, divisor);
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ // Combine the deoptimization sites.
+ Label ok;
+ __ Cbnz(divisor, &ok);
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+ __ Bind(&ok);
+ }
+ __ Msub(result, result, divisor, dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Cbnz(result, &done);
+ if (deopt.is_bound()) { // TODO(all) This is a hack, remove this...
+ __ Tbnz(dividend, kWSignBit, &deopt);
+ } else {
+ DeoptimizeIfNegative(dividend, instr->environment());
+ }
+ }
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
+ ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32());
+ bool is_smi = instr->hydrogen()->representation().IsSmi();
+ Register result =
+ is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
+ Register left =
+ is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
+ int32_t right = ToInteger32(instr->right());
+ ASSERT((right > -kMaxInt) || (right < kMaxInt));
+
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (bailout_on_minus_zero) {
+ if (right < 0) {
+ // The result is -0 if right is negative and left is zero.
+ DeoptimizeIfZero(left, instr->environment());
+ } else if (right == 0) {
+ // The result is -0 if the right is zero and the left is negative.
+ DeoptimizeIfNegative(left, instr->environment());
+ }
+ }
+
+ switch (right) {
+ // Cases which can detect overflow.
+ case -1:
+ if (can_overflow) {
+ // Only 0x80000000 can overflow here.
+ __ Negs(result, left);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Neg(result, left);
+ }
+ break;
+ case 0:
+ // This case can never overflow.
+ __ Mov(result, 0);
+ break;
+ case 1:
+ // This case can never overflow.
+ __ Mov(result, left, kDiscardForSameWReg);
+ break;
+ case 2:
+ if (can_overflow) {
+ __ Adds(result, left, left);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Add(result, left, left);
+ }
+ break;
+
+ default:
+ // Multiplication by constant powers of two (and some related values)
+ // can be done efficiently with shifted operands.
+ int32_t right_abs = Abs(right);
+
+ if (IsPowerOf2(right_abs)) {
+ int right_log2 = WhichPowerOf2(right_abs);
+
+ if (can_overflow) {
+ Register scratch = result;
+ ASSERT(!AreAliased(scratch, left));
+ __ Cls(scratch, left);
+ __ Cmp(scratch, right_log2);
+ DeoptimizeIf(lt, instr->environment());
+ }
+
+ if (right >= 0) {
+ // result = left << log2(right)
+ __ Lsl(result, left, right_log2);
+ } else {
+ // result = -left << log2(-right)
+ if (can_overflow) {
+ __ Negs(result, Operand(left, LSL, right_log2));
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Neg(result, Operand(left, LSL, right_log2));
+ }
+ }
+ return;
+ }
+
+
+ // For the following cases, we could perform a conservative overflow check
+ // with CLS as above. However the few cycles saved are likely not worth
+ // the risk of deoptimizing more often than required.
+ ASSERT(!can_overflow);
+
+ if (right >= 0) {
+ if (IsPowerOf2(right - 1)) {
+ // result = left + left << log2(right - 1)
+ __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
+ } else if (IsPowerOf2(right + 1)) {
+ // result = -left + left << log2(right + 1)
+ __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
+ __ Neg(result, result);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ if (IsPowerOf2(-right + 1)) {
+ // result = left - left << log2(-right + 1)
+ __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
+ } else if (IsPowerOf2(-right - 1)) {
+ // result = -left - left << log2(-right - 1)
+ __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
+ __ Neg(result, result);
+ } else {
+ UNREACHABLE();
+ }
+ }
+ }
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Register right = ToRegister32(instr->right());
+
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (bailout_on_minus_zero && !left.Is(right)) {
+ // If one operand is zero and the other is negative, the result is -0.
+ // - Set Z (eq) if either left or right, or both, are 0.
+ __ Cmp(left, 0);
+ __ Ccmp(right, 0, ZFlag, ne);
+ // - If so (eq), set N (mi) if left + right is negative.
+ // - Otherwise, clear N.
+ __ Ccmn(left, right, NoFlag, eq);
+ DeoptimizeIf(mi, instr->environment());
+ }
+
+ if (can_overflow) {
+ __ Smull(result.X(), left, right);
+ __ Cmp(result.X(), Operand(result, SXTW));
+ DeoptimizeIf(ne, instr->environment());
+ } else {
+ __ Mul(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoMulS(LMulS* instr) {
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (bailout_on_minus_zero && !left.Is(right)) {
+ // If one operand is zero and the other is negative, the result is -0.
+ // - Set Z (eq) if either left or right, or both, are 0.
+ __ Cmp(left, 0);
+ __ Ccmp(right, 0, ZFlag, ne);
+ // - If so (eq), set N (mi) if left + right is negative.
+ // - Otherwise, clear N.
+ __ Ccmn(left, right, NoFlag, eq);
+ DeoptimizeIf(mi, instr->environment());
+ }
+
+ STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
+ if (can_overflow) {
+ __ Smulh(result, left, right);
+ __ Cmp(result, Operand(result.W(), SXTW));
+ __ SmiTag(result);
+ DeoptimizeIf(ne, instr->environment());
+ } else {
+ if (AreAliased(result, left, right)) {
+ // All three registers are the same: half untag the input and then
+ // multiply, giving a tagged result.
+ STATIC_ASSERT((kSmiShift % 2) == 0);
+ __ Asr(result, left, kSmiShift / 2);
+ __ Mul(result, result, result);
+ } else if (result.Is(left) && !left.Is(right)) {
+ // Registers result and left alias, right is distinct: untag left into
+ // result, and then multiply by right, giving a tagged result.
+ __ SmiUntag(result, left);
+ __ Mul(result, result, right);
+ } else {
+ ASSERT(!left.Is(result));
+ // Registers result and right alias, left is distinct, or all registers
+ // are distinct: untag right into result, and then multiply by left,
+ // giving a tagged result.
+ __ SmiUntag(result, right);
+ __ Mul(result, left, result);
+ }
+ }
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register result = ToRegister(instr->result());
+ __ Mov(result, 0);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ // NumberTagU and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+ class DeferredNumberTagD: public LDeferredCode {
+ public:
+ DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagD* instr_;
+ };
+
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
+ if (FLAG_inline_new) {
+ __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
+ } else {
+ __ B(deferred->entry());
+ }
+
+ __ Bind(deferred->exit());
+ __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ Label slow, convert_and_store;
+ Register src = ToRegister32(value);
+ Register dst = ToRegister(instr->result());
+ Register scratch1 = ToRegister(temp1);
+
+ if (FLAG_inline_new) {
+ Register scratch2 = ToRegister(temp2);
+ __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
+ __ B(&convert_and_store);
+ }
+
+ // Slow case: call the runtime system to do the number allocation.
+ __ Bind(&slow);
+ // TODO(3095996): Put a valid pointer value in the stack slot where the result
+ // register is stored, as this register is in the pointer map, but contains an
+ // integer value.
+ __ Mov(dst, 0);
+ {
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // NumberTagU and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(x0, dst);
+ }
+
+ // Convert number to floating point and store in the newly allocated heap
+ // number.
+ __ Bind(&convert_and_store);
+ DoubleRegister dbl_scratch = double_scratch();
+ __ Ucvtf(dbl_scratch, src);
+ __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+ class DeferredNumberTagU: public LDeferredCode {
+ public:
+ DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredNumberTagU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2());
+ }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LNumberTagU* instr_;
+ };
+
+ Register value = ToRegister32(instr->value());
+ Register result = ToRegister(instr->result());
+
+ DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+ __ Cmp(value, Smi::kMaxValue);
+ __ B(hi, deferred->entry());
+ __ SmiTag(result, value.X());
+ __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+ Register input = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ bool can_convert_undefined_to_nan =
+ instr->hydrogen()->can_convert_undefined_to_nan();
+
+ Label done, load_smi;
+
+ // Work out what untag mode we're working with.
+ HValue* value = instr->hydrogen()->value();
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ __ JumpIfSmi(input, &load_smi);
+
+ Label convert_undefined;
+
+ // Heap number map check.
+ __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ if (can_convert_undefined_to_nan) {
+ __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
+ &convert_undefined);
+ } else {
+ DeoptimizeIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
+ instr->environment());
+ }
+
+ // Load heap number.
+ __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
+ if (instr->hydrogen()->deoptimize_on_minus_zero()) {
+ DeoptimizeIfMinusZero(result, instr->environment());
+ }
+ __ B(&done);
+
+ if (can_convert_undefined_to_nan) {
+ __ Bind(&convert_undefined);
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
+ instr->environment());
+
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+ __ B(&done);
+ }
+
+ } else {
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
+ // Fall through to load_smi.
+ }
+
+ // Smi to double register conversion.
+ __ Bind(&load_smi);
+ __ SmiUntagToDouble(result, input);
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+ // This is a pseudo-instruction that ensures that the environment here is
+ // properly registered for deoptimization and records the assembler's PC
+ // offset.
+ LEnvironment* environment = instr->environment();
+
+ // If the environment were already registered, we would have no way of
+ // backpatching it with the spill slot operands.
+ ASSERT(!environment->HasBeenRegistered());
+ RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+ GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+ // Nothing to do.
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+ LOperand* argument = instr->value();
+ if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+ Abort(kDoPushArgumentNotImplementedForDoubleType);
+ } else {
+ __ Push(ToRegister(argument));
+ }
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+ if (FLAG_trace && info()->IsOptimizing()) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in x0. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
+ __ Push(x0);
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+
+ if (info()->saves_caller_doubles()) {
+ RestoreCallerDoubles();
+ }
+
+ int no_frame_start = -1;
+ if (NeedsEagerFrame()) {
+ Register stack_pointer = masm()->StackPointer();
+ __ Mov(stack_pointer, fp);
+ no_frame_start = masm_->pc_offset();
+ __ Pop(fp, lr);
+ }
+
+ if (instr->has_constant_parameter_count()) {
+ int parameter_count = ToInteger32(instr->constant_parameter_count());
+ __ Drop(parameter_count + 1);
+ } else {
+ Register parameter_count = ToRegister(instr->parameter_count());
+ __ DropBySMI(parameter_count);
+ }
+ __ Ret();
+
+ if (no_frame_start != -1) {
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ }
+}
+
+
+MemOperand LCodeGen::BuildSeqStringOperand(Register string,
+ Register temp,
+ LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToInteger32(LConstantOperand::cast(index));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+ }
+
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Add(temp, string, Operand(ToRegister32(index), SXTW));
+ } else {
+ STATIC_ASSERT(kUC16Size == 2);
+ __ Add(temp, string, Operand(ToRegister32(index), SXTW, 1));
+ }
+ return FieldMemOperand(temp, SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+
+ if (FLAG_debug_code) {
+ // Even though this lithium instruction comes with a temp register, we
+ // can't use it here because we want to use "AtStart" constraints on the
+ // inputs and the debug code here needs a scratch register.
+ UseScratchRegisterScope temps(masm());
+ Register dbg_temp = temps.AcquireX();
+
+ __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset));
+
+ __ And(dbg_temp, dbg_temp,
+ Operand(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(eq, kUnexpectedStringType);
+ }
+
+ MemOperand operand =
+ BuildSeqStringOperand(string, temp, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Ldrb(result, operand);
+ } else {
+ __ Ldrh(result, operand);
+ }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register value = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+
+ if (FLAG_debug_code) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
+ encoding_mask);
+ }
+ MemOperand operand =
+ BuildSeqStringOperand(string, temp, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Strb(value, operand);
+ } else {
+ __ Strh(value, operand);
+ }
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ DeoptimizeIfNegative(input.W(), instr->environment());
+ }
+ __ SmiTag(output, input);
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label done, untag;
+
+ if (instr->needs_check()) {
+ DeoptimizeIfNotSmi(input, instr->environment());
+ }
+
+ __ Bind(&untag);
+ __ SmiUntag(result, input);
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+ LOperand* right_op = instr->right();
+ Register left = ToRegister32(instr->left());
+ Register result = ToRegister32(instr->result());
+
+ if (right_op->IsRegister()) {
+ Register right = ToRegister32(instr->right());
+ switch (instr->op()) {
+ case Token::ROR: __ Ror(result, left, right); break;
+ case Token::SAR: __ Asr(result, left, right); break;
+ case Token::SHL: __ Lsl(result, left, right); break;
+ case Token::SHR:
+ if (instr->can_deopt()) {
+ Label right_not_zero;
+ __ Cbnz(right, &right_not_zero);
+ DeoptimizeIfNegative(left, instr->environment());
+ __ Bind(&right_not_zero);
+ }
+ __ Lsr(result, left, right);
+ break;
+ default: UNREACHABLE();
+ }
+ } else {
+ ASSERT(right_op->IsConstantOperand());
+ int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
+ if (shift_count == 0) {
+ if ((instr->op() == Token::SHR) && instr->can_deopt()) {
+ DeoptimizeIfNegative(left, instr->environment());
+ }
+ __ Mov(result, left, kDiscardForSameWReg);
+ } else {
+ switch (instr->op()) {
+ case Token::ROR: __ Ror(result, left, shift_count); break;
+ case Token::SAR: __ Asr(result, left, shift_count); break;
+ case Token::SHL: __ Lsl(result, left, shift_count); break;
+ case Token::SHR: __ Lsr(result, left, shift_count); break;
+ default: UNREACHABLE();
+ }
+ }
+ }
+}
+
+
+void LCodeGen::DoShiftS(LShiftS* instr) {
+ LOperand* right_op = instr->right();
+ Register left = ToRegister(instr->left());
+ Register result = ToRegister(instr->result());
+
+ // Only ROR by register needs a temp.
+ ASSERT(((instr->op() == Token::ROR) && right_op->IsRegister()) ||
+ (instr->temp() == NULL));
+
+ if (right_op->IsRegister()) {
+ Register right = ToRegister(instr->right());
+ switch (instr->op()) {
+ case Token::ROR: {
+ Register temp = ToRegister(instr->temp());
+ __ Ubfx(temp, right, kSmiShift, 5);
+ __ SmiUntag(result, left);
+ __ Ror(result.W(), result.W(), temp.W());
+ __ SmiTag(result);
+ break;
+ }
+ case Token::SAR:
+ __ Ubfx(result, right, kSmiShift, 5);
+ __ Asr(result, left, result);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ case Token::SHL:
+ __ Ubfx(result, right, kSmiShift, 5);
+ __ Lsl(result, left, result);
+ break;
+ case Token::SHR:
+ if (instr->can_deopt()) {
+ Label right_not_zero;
+ __ Cbnz(right, &right_not_zero);
+ DeoptimizeIfNegative(left, instr->environment());
+ __ Bind(&right_not_zero);
+ }
+ __ Ubfx(result, right, kSmiShift, 5);
+ __ Lsr(result, left, result);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ default: UNREACHABLE();
+ }
+ } else {
+ ASSERT(right_op->IsConstantOperand());
+ int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
+ if (shift_count == 0) {
+ if ((instr->op() == Token::SHR) && instr->can_deopt()) {
+ DeoptimizeIfNegative(left, instr->environment());
+ }
+ __ Mov(result, left);
+ } else {
+ switch (instr->op()) {
+ case Token::ROR:
+ __ SmiUntag(result, left);
+ __ Ror(result.W(), result.W(), shift_count);
+ __ SmiTag(result);
+ break;
+ case Token::SAR:
+ __ Asr(result, left, shift_count);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ case Token::SHL:
+ __ Lsl(result, left, shift_count);
+ break;
+ case Token::SHR:
+ __ Lsr(result, left, shift_count);
+ __ Bic(result, result, kSmiShiftMask);
+ break;
+ default: UNREACHABLE();
+ }
+ }
+ }
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+ __ Debug("LDebugBreak", 0, BREAK);
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Register scratch1 = x5;
+ Register scratch2 = x6;
+ ASSERT(instr->IsMarkedAsCall());
+
+ ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
+ // TODO(all): if Mov could handle object in new space then it could be used
+ // here.
+ __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
+ __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
+ __ Push(cp, scratch1, scratch2); // The context is the first argument.
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
+ RecordSafepointWithLazyDeopt(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+ class DeferredStackCheck: public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStackCheck* instr_;
+ };
+
+ ASSERT(instr->HasEnvironment());
+ LEnvironment* env = instr->environment();
+ // There is no LLazyBailout instruction for stack-checks. We have to
+ // prepare for lazy deoptimization explicitly here.
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
+ __ B(hs, &done);
+
+ PredictableCodeSizeScope predictable(masm_,
+ Assembler::kCallSizeWithRelocation);
+ ASSERT(instr->context()->IsRegister());
+ ASSERT(ToRegister(instr->context()).is(cp));
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
+ __ Bind(&done);
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new(zone()) DeferredStackCheck(this, instr);
+ __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
+ __ B(lo, deferred_stack_check->entry());
+
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ __ Bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+ // Don't record a deoptimization index for the safepoint here.
+ // This will be done explicitly when emitting call and the safepoint in
+ // the deferred code.
+ }
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ Register temp = ToRegister(instr->temp());
+ __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
+ __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
+ MemOperand target = ContextMemOperand(context, instr->slot_index());
+
+ Label skip_assignment;
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ Ldr(scratch, target);
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex,
+ instr->environment());
+ } else {
+ __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
+ }
+ }
+
+ __ Str(value, target);
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ __ RecordWriteContextSlot(context,
+ target.offset(),
+ value,
+ scratch,
+ GetLinkRegisterState(),
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+ __ Bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+ Register value = ToRegister(instr->value());
+ Register cell = ToRegister(instr->temp1());
+
+ // Load the cell.
+ __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
+
+ // If the cell we are storing to contains the hole it could have
+ // been deleted from the property dictionary. In that case, we need
+ // to update the property details in the property dictionary to mark
+ // it as no longer deleted. We deoptimize in that case.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ Register payload = ToRegister(instr->temp2());
+ __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
+ DeoptimizeIfRoot(
+ payload, Heap::kTheHoleValueRootIndex, instr->environment());
+ }
+
+ // Store the value.
+ __ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
+ // Cells are always rescanned, so no write barrier here.
+}
+
+
+void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
+ Register ext_ptr = ToRegister(instr->elements());
+ Register key = no_reg;
+ Register scratch;
+ ElementsKind elements_kind = instr->elements_kind();
+
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ ASSERT(instr->temp() == NULL);
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ scratch = ToRegister(instr->temp());
+ }
+
+ MemOperand dst =
+ PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
+ key_is_constant, constant_key,
+ elements_kind,
+ instr->additional_index());
+
+ if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
+ (elements_kind == FLOAT32_ELEMENTS)) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ DoubleRegister dbl_scratch = double_scratch();
+ __ Fcvt(dbl_scratch.S(), value);
+ __ Str(dbl_scratch.S(), dst);
+ } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
+ (elements_kind == FLOAT64_ELEMENTS)) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ __ Str(value, dst);
+ } else {
+ Register value = ToRegister(instr->value());
+
+ switch (elements_kind) {
+ case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+ case EXTERNAL_INT8_ELEMENTS:
+ case EXTERNAL_UINT8_ELEMENTS:
+ case UINT8_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ case INT8_ELEMENTS:
+ __ Strb(value, dst);
+ break;
+ case EXTERNAL_INT16_ELEMENTS:
+ case EXTERNAL_UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT16_ELEMENTS:
+ __ Strh(value, dst);
+ break;
+ case EXTERNAL_INT32_ELEMENTS:
+ case EXTERNAL_UINT32_ELEMENTS:
+ case INT32_ELEMENTS:
+ case UINT32_ELEMENTS:
+ __ Str(value.W(), dst);
+ break;
+ case FLOAT32_ELEMENTS:
+ case FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT64_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
+ Register elements = ToRegister(instr->elements());
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ Register store_base = no_reg;
+ int offset = 0;
+
+ if (instr->key()->IsConstantOperand()) {
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xf0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ store_base = ToRegister(instr->temp());
+ Register key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+ CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind());
+ offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
+ }
+
+ if (instr->NeedsCanonicalization()) {
+ DoubleRegister dbl_scratch = double_scratch();
+ __ Fmov(dbl_scratch,
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ __ Fmaxnm(dbl_scratch, dbl_scratch, value);
+ __ Str(dbl_scratch, FieldMemOperand(store_base, offset));
+ } else {
+ __ Str(value, FieldMemOperand(store_base, offset));
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register scratch = no_reg;
+ Register store_base = no_reg;
+ Register key = no_reg;
+ int offset = 0;
+
+ if (!instr->key()->IsConstantOperand() ||
+ instr->hydrogen()->NeedsWriteBarrier()) {
+ scratch = ToRegister(instr->temp());
+ }
+
+ if (instr->key()->IsConstantOperand()) {
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ store_base = scratch;
+ key = ToRegister(instr->key());
+ bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+ CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind());
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ Representation representation = instr->hydrogen()->value()->representation();
+ if (representation.IsInteger32()) {
+ ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ __ Store(value, UntagSmiFieldMemOperand(store_base, offset),
+ Representation::Integer32());
+ } else {
+ __ Store(value, FieldMemOperand(store_base, offset), representation);
+ }
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ ASSERT(representation.IsTagged());
+ // This assignment may cause element_addr to alias store_base.
+ Register element_addr = scratch;
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ Add(element_addr, store_base, offset - kHeapObjectTag);
+ __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
+ kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->object()).Is(x2));
+ ASSERT(ToRegister(instr->key()).Is(x1));
+ ASSERT(ToRegister(instr->value()).Is(x0));
+
+ Handle<Code> ic = instr->strict_mode() == STRICT
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
+ Register object = ToRegister(instr->object());
+ HObjectAccess access = instr->hydrogen()->access();
+ Handle<Map> transition = instr->transition();
+ int offset = access.offset();
+
+ if (access.IsExternalMemory()) {
+ ASSERT(transition.is_null());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ Register value = ToRegister(instr->value());
+ __ Store(value, MemOperand(object, offset), representation);
+ return;
+ } else if (representation.IsDouble()) {
+ ASSERT(transition.is_null());
+ ASSERT(access.IsInobject());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ FPRegister value = ToDoubleRegister(instr->value());
+ __ Str(value, FieldMemOperand(object, offset));
+ return;
+ }
+
+ Register value = ToRegister(instr->value());
+
+ SmiCheck check_needed = instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+
+ ASSERT(!(representation.IsSmi() &&
+ instr->value()->IsConstantOperand() &&
+ !IsInteger32Constant(LConstantOperand::cast(instr->value()))));
+ if (representation.IsHeapObject() &&
+ !instr->hydrogen()->value()->type().IsHeapObject()) {
+ DeoptimizeIfSmi(value, instr->environment());
+
+ // We know that value is a smi now, so we can omit the check below.
+ check_needed = OMIT_SMI_CHECK;
+ }
+
+ if (!transition.is_null()) {
+ // Store the new map value.
+ Register new_map_value = ToRegister(instr->temp0());
+ __ Mov(new_map_value, Operand(transition));
+ __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
+ if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
+ // Update the write barrier for the map field.
+ __ RecordWriteField(object,
+ HeapObject::kMapOffset,
+ new_map_value,
+ ToRegister(instr->temp1()),
+ GetLinkRegisterState(),
+ kSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ }
+ }
+
+ // Do the store.
+ Register destination;
+ if (access.IsInobject()) {
+ destination = object;
+ } else {
+ Register temp0 = ToRegister(instr->temp0());
+ __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ destination = temp0;
+ }
+
+ if (representation.IsSmi() &&
+ instr->hydrogen()->value()->representation().IsInteger32()) {
+ ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+#ifdef DEBUG
+ Register temp0 = ToRegister(instr->temp0());
+ __ Ldr(temp0, FieldMemOperand(destination, offset));
+ __ AssertSmi(temp0);
+ // If destination aliased temp0, restore it to the address calculated
+ // earlier.
+ if (destination.Is(temp0)) {
+ ASSERT(!access.IsInobject());
+ __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ }
+#endif
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+ __ Store(value, UntagSmiFieldMemOperand(destination, offset),
+ Representation::Integer32());
+ } else {
+ __ Store(value, FieldMemOperand(destination, offset), representation);
+ }
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ __ RecordWriteField(destination,
+ offset,
+ value, // Clobbered.
+ ToRegister(instr->temp1()), // Clobbered.
+ GetLinkRegisterState(),
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->value()).is(x0));
+ ASSERT(ToRegister(instr->object()).is(x1));
+
+ // Name must be in x2.
+ __ Mov(x2, Operand(instr->name()));
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ ASSERT(ToRegister(instr->left()).Is(x1));
+ ASSERT(ToRegister(instr->right()).Is(x0));
+ StringAddStub stub(instr->hydrogen()->flags(),
+ instr->hydrogen()->pretenure_flag());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt: public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ DeferredStringCharCodeAt* deferred =
+ new(zone()) DeferredStringCharCodeAt(this, instr);
+
+ StringCharLoadGenerator::Generate(masm(),
+ ToRegister(instr->string()),
+ ToRegister32(instr->index()),
+ ToRegister(instr->result()),
+ deferred->entry());
+ __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Mov(result, 0);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ Register index = ToRegister(instr->index());
+ __ SmiTag(index);
+ __ Push(index);
+
+ CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
+ instr->context());
+ __ AssertSmi(x0);
+ __ SmiUntag(x0);
+ __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+ class DeferredStringCharFromCode: public LDeferredCode {
+ public:
+ DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LStringCharFromCode* instr_;
+ };
+
+ DeferredStringCharFromCode* deferred =
+ new(zone()) DeferredStringCharFromCode(this, instr);
+
+ ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+ Register char_code = ToRegister32(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ __ Cmp(char_code, String::kMaxOneByteCharCode);
+ __ B(hi, deferred->entry());
+ __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+ __ Add(result, result, Operand(char_code, SXTW, kPointerSizeLog2));
+ __ Ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+ __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
+ __ B(eq, deferred->entry());
+ __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+ Register char_code = ToRegister(instr->char_code());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Mov(result, 0);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ SmiTag(char_code);
+ __ Push(char_code);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ InlineSmiCheckInfo::EmitNotInlined(masm());
+
+ Condition condition = TokenToCondition(op, false);
+
+ EmitCompareAndBranch(instr, condition, x0, 0);
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister32(instr->result());
+ Register left = ToRegister32(instr->left());
+ Operand right = ToOperand32I(instr->right());
+ if (can_overflow) {
+ __ Subs(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Sub(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoSubS(LSubS* instr) {
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ Register result = ToRegister(instr->result());
+ Register left = ToRegister(instr->left());
+ Operand right = ToOperand(instr->right());
+ if (can_overflow) {
+ __ Subs(result, left, right);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ Sub(result, left, right);
+ }
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ Register input = ToRegister(value);
+ Register scratch1 = ToRegister(temp1);
+ DoubleRegister dbl_scratch1 = double_scratch();
+
+ Label done;
+
+ // Load heap object map.
+ __ Ldr(scratch1, FieldMemOperand(input, HeapObject::kMapOffset));
+
+ if (instr->truncating()) {
+ Register output = ToRegister(instr->result());
+ Label check_bools;
+
+ // If it's not a heap number, jump to undefined check.
+ __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools);
+
+ // A heap number: load value and convert to int32 using truncating function.
+ __ TruncateHeapNumberToI(output, input);
+ __ B(&done);
+
+ __ Bind(&check_bools);
+
+ Register true_root = output;
+ Register false_root = scratch1;
+ __ LoadTrueFalseRoots(true_root, false_root);
+ __ Cmp(input, true_root);
+ __ Cset(output, eq);
+ __ Ccmp(input, false_root, ZFlag, ne);
+ __ B(eq, &done);
+
+ // Output contains zero, undefined is converted to zero for truncating
+ // conversions.
+ DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
+ instr->environment());
+ } else {
+ Register output = ToRegister32(instr->result());
+
+ DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
+
+ // Deoptimized if it's not a heap number.
+ DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex,
+ instr->environment());
+
+ // A heap number: load value and convert to int32 using non-truncating
+ // function. If the result is out of range, branch to deoptimize.
+ __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
+ __ TryConvertDoubleToInt32(output, dbl_scratch1, dbl_scratch2);
+ DeoptimizeIf(ne, instr->environment());
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ Cmp(output, 0);
+ __ B(ne, &done);
+ __ Fmov(scratch1, dbl_scratch1);
+ DeoptimizeIfNegative(scratch1, instr->environment());
+ }
+ }
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
+ instr_->temp2());
+ }
+
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LTaggedToI* instr_;
+ };
+
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(output, input);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+
+ __ JumpIfNotSmi(input, deferred->entry());
+ __ SmiUntag(output, input);
+ __ Bind(deferred->exit());
+ }
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+ ASSERT(ToRegister(instr->value()).Is(x0));
+ ASSERT(ToRegister(instr->result()).Is(x0));
+ __ Push(x0);
+ CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ Label materialized;
+ // Registers will be used as follows:
+ // x7 = literals array.
+ // x1 = regexp literal.
+ // x0 = regexp literal clone.
+ // x10-x12 are used as temporaries.
+ int literal_offset =
+ FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ __ LoadObject(x7, instr->hydrogen()->literals());
+ __ Ldr(x1, FieldMemOperand(x7, literal_offset));
+ __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
+
+ // Create regexp literal using runtime function
+ // Result will be in x0.
+ __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ Mov(x11, Operand(instr->hydrogen()->pattern()));
+ __ Mov(x10, Operand(instr->hydrogen()->flags()));
+ __ Push(x7, x12, x11, x10);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
+ __ Mov(x1, x0);
+
+ __ Bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+
+ __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
+ __ B(&allocated);
+
+ __ Bind(&runtime_allocate);
+ __ Mov(x0, Smi::FromInt(size));
+ __ Push(x1, x0);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
+ __ Pop(x1);
+
+ __ Bind(&allocated);
+ // Copy the content into the newly allocated memory.
+ __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp1 = ToRegister(instr->temp1());
+
+ Handle<Map> from_map = instr->original_map();
+ Handle<Map> to_map = instr->transitioned_map();
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
+
+ Label not_applicable;
+ __ CheckMap(object, temp1, from_map, &not_applicable, DONT_DO_SMI_CHECK);
+
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register new_map = ToRegister(instr->temp2());
+ __ Mov(new_map, Operand(to_map));
+ __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ // Write barrier.
+ __ RecordWriteField(object, HeapObject::kMapOffset, new_map, temp1,
+ GetLinkRegisterState(), kDontSaveFPRegs);
+ } else {
+ ASSERT(ToRegister(instr->context()).is(cp));
+ PushSafepointRegistersScope scope(
+ this, Safepoint::kWithRegistersAndDoubles);
+ __ Mov(x0, object);
+ __ Mov(x1, Operand(to_map));
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
+ __ CallStub(&stub);
+ RecordSafepointWithRegistersAndDoubles(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ }
+ __ Bind(&not_applicable);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ Label no_memento_found;
+ __ JumpIfJSArrayHasAllocationMemento(object, temp1, temp2, &no_memento_found);
+ Deoptimize(instr->environment());
+ __ Bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ __ TruncateDoubleToI(result, input);
+ if (instr->tag_result()) {
+ __ SmiTag(result, result);
+ }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+ Register input = ToRegister(instr->value());
+ __ Push(input);
+ CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+ Handle<String> type_name = instr->type_literal();
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+ Register value = ToRegister(instr->value());
+
+ if (type_name->Equals(heap()->number_string())) {
+ ASSERT(instr->temp1() != NULL);
+ Register map = ToRegister(instr->temp1());
+
+ __ JumpIfSmi(value, true_label);
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ EmitBranch(instr, eq);
+
+ } else if (type_name->Equals(heap()->string_string())) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, false_label);
+ __ JumpIfObjectType(
+ value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
+ __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
+
+ } else if (type_name->Equals(heap()->symbol_string())) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, false_label);
+ __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
+ EmitBranch(instr, eq);
+
+ } else if (type_name->Equals(heap()->boolean_string())) {
+ __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
+ __ CompareRoot(value, Heap::kFalseValueRootIndex);
+ EmitBranch(instr, eq);
+
+ } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
+ __ CompareRoot(value, Heap::kNullValueRootIndex);
+ EmitBranch(instr, eq);
+
+ } else if (type_name->Equals(heap()->undefined_string())) {
+ ASSERT(instr->temp1() != NULL);
+ Register scratch = ToRegister(instr->temp1());
+
+ __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
+ __ JumpIfSmi(value, false_label);
+ // Check for undetectable objects and jump to the true branch in this case.
+ __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
+
+ } else if (type_name->Equals(heap()->function_string())) {
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ ASSERT(instr->temp1() != NULL);
+ Register type = ToRegister(instr->temp1());
+
+ __ JumpIfSmi(value, false_label);
+ __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
+ // HeapObject's type has been loaded into type register by JumpIfObjectType.
+ EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
+
+ } else if (type_name->Equals(heap()->object_string())) {
+ ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+ Register map = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp2());
+
+ __ JumpIfSmi(value, false_label);
+ if (!FLAG_harmony_typeof) {
+ __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
+ }
+ __ JumpIfObjectType(value, map, scratch,
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
+ __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ B(gt, false_label);
+ // Check for undetectable objects => false.
+ __ Ldrb(scratch, FieldMemOperand(value, Map::kBitFieldOffset));
+ EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
+
+ } else {
+ __ B(false_label);
+ }
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+ Register object = ToRegister(instr->value());
+ Register map = ToRegister(instr->map());
+ Register temp = ToRegister(instr->temp());
+ __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Cmp(map, temp);
+ DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+
+ // If the receiver is null or undefined, we have to pass the global object as
+ // a receiver to normal functions. Values have to be passed unchanged to
+ // builtins and strict-mode functions.
+ Label global_object, done, deopt;
+
+ if (!instr->hydrogen()->known_function()) {
+ __ Ldr(result, FieldMemOperand(function,
+ JSFunction::kSharedFunctionInfoOffset));
+
+ // CompilerHints is an int32 field. See objects.h.
+ __ Ldr(result.W(),
+ FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
+
+ // Do not transform the receiver to object for strict mode functions.
+ __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &done);
+
+ // Do not transform the receiver to object for builtins.
+ __ Tbnz(result, SharedFunctionInfo::kNative, &done);
+ }
+
+ // Normal function. Replace undefined or null with global receiver.
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
+ __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
+
+ // Deoptimize if the receiver is not a JS object.
+ __ JumpIfSmi(receiver, &deopt);
+ __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
+ __ Mov(result, receiver);
+ __ B(ge, &done);
+ // Otherwise, fall through to deopt.
+
+ __ Bind(&deopt);
+ Deoptimize(instr->environment());
+
+ __ Bind(&global_object);
+ __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
+ __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+
+ __ Bind(&done);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ Register object = ToRegister(instr->object());
+ Register index = ToRegister(instr->index());
+ Register result = ToRegister(instr->result());
+
+ __ AssertSmi(index);
+
+ Label out_of_object, done;
+ __ Cmp(index, Smi::FromInt(0));
+ __ B(lt, &out_of_object);
+
+ STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
+ __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
+
+ __ B(&done);
+
+ __ Bind(&out_of_object);
+ __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ // Index is equal to negated out of object property index plus 1.
+ __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Ldr(result, FieldMemOperand(result,
+ FixedArray::kHeaderSize - kPointerSize));
+ __ Bind(&done);
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.h b/deps/v8/src/arm64/lithium-codegen-arm64.h
new file mode 100644
index 000000000..b1d8b70d5
--- /dev/null
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.h
@@ -0,0 +1,490 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
+#define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
+
+#include "arm64/lithium-arm64.h"
+
+#include "arm64/lithium-gap-resolver-arm64.h"
+#include "deoptimizer.h"
+#include "lithium-codegen.h"
+#include "safepoint-table.h"
+#include "scopes.h"
+#include "v8utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+class BranchGenerator;
+
+class LCodeGen: public LCodeGenBase {
+ public:
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ : LCodeGenBase(chunk, assembler, info),
+ deoptimizations_(4, info->zone()),
+ deopt_jump_table_(4, info->zone()),
+ deoptimization_literals_(8, info->zone()),
+ inlined_function_count_(0),
+ scope_(info->scope()),
+ translations_(info->zone()),
+ deferred_(8, info->zone()),
+ osr_pc_offset_(-1),
+ frame_is_built_(false),
+ safepoints_(info->zone()),
+ resolver_(this),
+ expected_safepoint_kind_(Safepoint::kSimple) {
+ PopulateDeoptimizationLiteralsWithInlinedFunctions();
+ }
+
+ // Simple accessors.
+ Scope* scope() const { return scope_; }
+
+ int LookupDestination(int block_id) const {
+ return chunk()->LookupDestination(block_id);
+ }
+
+ bool IsNextEmittedBlock(int block_id) const {
+ return LookupDestination(block_id) == GetNextEmittedBlock();
+ }
+
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub() ||
+ info()->requires_frame();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
+ LinkRegisterStatus GetLinkRegisterState() const {
+ return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
+ }
+
+ // Try to generate code for the entire chunk, but it may fail if the
+ // chunk contains constructs we cannot handle. Returns true if the
+ // code generation attempt succeeded.
+ bool GenerateCode();
+
+ // Finish the code by setting stack height, safepoint, and bailout
+ // information on it.
+ void FinishCode(Handle<Code> code);
+
+ // Support for converting LOperands to assembler types.
+ // LOperand must be a register.
+ Register ToRegister(LOperand* op) const;
+ Register ToRegister32(LOperand* op) const;
+ Operand ToOperand(LOperand* op);
+ Operand ToOperand32I(LOperand* op);
+ Operand ToOperand32U(LOperand* op);
+ MemOperand ToMemOperand(LOperand* op) const;
+ Handle<Object> ToHandle(LConstantOperand* op) const;
+
+ // TODO(jbramley): Examine these helpers and check that they make sense.
+ // IsInteger32Constant returns true for smi constants, for example.
+ bool IsInteger32Constant(LConstantOperand* op) const;
+ bool IsSmi(LConstantOperand* op) const;
+
+ int32_t ToInteger32(LConstantOperand* op) const;
+ Smi* ToSmi(LConstantOperand* op) const;
+ double ToDouble(LConstantOperand* op) const;
+ DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+ // Return a double scratch register which can be used locally
+ // when generating code for a lithium instruction.
+ DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
+
+ // Deferred code support.
+ void DoDeferredNumberTagD(LNumberTagD* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
+ Label* exit,
+ Label* allocation_entry);
+
+ enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+ void DoDeferredNumberTagU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2);
+ void DoDeferredTaggedToI(LTaggedToI* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2);
+ void DoDeferredAllocate(LAllocate* instr);
+ void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+
+ Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
+
+ static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+ void EmitGoto(int block);
+ void DoGap(LGap* instr);
+
+ // Generic version of EmitBranch. It contains some code to avoid emitting a
+ // branch on the next emitted basic block where we could just fall-through.
+ // You shouldn't use that directly but rather consider one of the helper like
+ // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
+ template<class InstrType>
+ void EmitBranchGeneric(InstrType instr,
+ const BranchGenerator& branch);
+
+ template<class InstrType>
+ void EmitBranch(InstrType instr, Condition condition);
+
+ template<class InstrType>
+ void EmitCompareAndBranch(InstrType instr,
+ Condition condition,
+ const Register& lhs,
+ const Operand& rhs);
+
+ template<class InstrType>
+ void EmitTestAndBranch(InstrType instr,
+ Condition condition,
+ const Register& value,
+ uint64_t mask);
+
+ template<class InstrType>
+ void EmitBranchIfNonZeroNumber(InstrType instr,
+ const FPRegister& value,
+ const FPRegister& scratch);
+
+ template<class InstrType>
+ void EmitBranchIfHeapNumber(InstrType instr,
+ const Register& value);
+
+ template<class InstrType>
+ void EmitBranchIfRoot(InstrType instr,
+ const Register& value,
+ Heap::RootListIndex index);
+
+ // Emits optimized code to deep-copy the contents of statically known object
+ // graphs (e.g. object literal boilerplate). Expects a pointer to the
+ // allocated destination object in the result register, and a pointer to the
+ // source object in the source register.
+ void EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ Register scratch,
+ int* offset,
+ AllocationSiteMode mode);
+
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
+ SmiCheck check_needed);
+
+ int DefineDeoptimizationLiteral(Handle<Object> literal);
+ void PopulateDeoptimizationData(Handle<Code> code);
+ void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+ MemOperand BuildSeqStringOperand(Register string,
+ Register temp,
+ LOperand* index,
+ String::Encoding encoding);
+ void DeoptimizeBranch(
+ LEnvironment* environment,
+ BranchType branch_type, Register reg = NoReg, int bit = -1,
+ Deoptimizer::BailoutType* override_bailout_type = NULL);
+ void Deoptimize(LEnvironment* environment,
+ Deoptimizer::BailoutType* override_bailout_type = NULL);
+ void DeoptimizeIf(Condition cc, LEnvironment* environment);
+ void DeoptimizeIfZero(Register rt, LEnvironment* environment);
+ void DeoptimizeIfNotZero(Register rt, LEnvironment* environment);
+ void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
+ void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
+ void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
+ void DeoptimizeIfRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment);
+ void DeoptimizeIfNotRoot(Register rt,
+ Heap::RootListIndex index,
+ LEnvironment* environment);
+ void DeoptimizeIfMinusZero(DoubleRegister input, LEnvironment* environment);
+ void DeoptimizeIfBitSet(Register rt, int bit, LEnvironment* environment);
+ void DeoptimizeIfBitClear(Register rt, int bit, LEnvironment* environment);
+ void ApplyCheckIf(Condition cc, LBoundsCheck* check);
+
+ MemOperand PrepareKeyedExternalArrayOperand(Register key,
+ Register base,
+ Register scratch,
+ bool key_is_smi,
+ bool key_is_constant,
+ int constant_key,
+ ElementsKind elements_kind,
+ int additional_index);
+ void CalcKeyedArrayBaseRegister(Register base,
+ Register elements,
+ Register key,
+ bool key_is_tagged,
+ ElementsKind elements_kind);
+
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+ Safepoint::DeoptMode mode);
+
+ int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+ void Abort(BailoutReason reason);
+
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+ // Emit frame translation commands for an environment.
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+ void AddToTranslation(LEnvironment* environment,
+ Translation* translation,
+ LOperand* op,
+ bool is_tagged,
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
+
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
+ // Code generation steps. Returns true if code generation should continue.
+ void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ bool GeneratePrologue();
+ bool GenerateDeferredCode();
+ bool GenerateDeoptJumpTable();
+ bool GenerateSafepointTable();
+
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
+ enum SafepointMode {
+ RECORD_SIMPLE_SAFEPOINT,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+ };
+
+ void CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr);
+
+ void CallCodeGeneric(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void CallRuntime(const Runtime::Function* function,
+ int num_arguments,
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, num_arguments, instr);
+ }
+
+ void LoadContextFromDeferred(LOperand* context);
+ void CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ LOperand* context);
+
+ // Generate a direct call to a known function.
+ // If the function is already loaded into x1 by the caller, function_reg may
+ // be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will
+ // automatically load it.
+ void CallKnownFunction(Handle<JSFunction> function,
+ int formal_parameter_count,
+ int arity,
+ LInstruction* instr,
+ Register function_reg = NoReg);
+
+ // Support for recording safepoint and position information.
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
+ void RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+ void RecordSafepoint(Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
+ int arguments,
+ Safepoint::DeoptMode mode);
+ void RecordSafepointWithLazyDeopt(LInstruction* instr,
+ SafepointMode safepoint_mode);
+
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+
+ ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<Deoptimizer::JumpTableEntry*> deopt_jump_table_;
+ ZoneList<Handle<Object> > deoptimization_literals_;
+ int inlined_function_count_;
+ Scope* const scope_;
+ TranslationBuffer translations_;
+ ZoneList<LDeferredCode*> deferred_;
+ int osr_pc_offset_;
+ bool frame_is_built_;
+
+ // Builder that keeps track of safepoints in the code. The table itself is
+ // emitted at the end of the generated code.
+ SafepointTableBuilder safepoints_;
+
+ // Compiler from a set of parallel moves to a sequential list of moves.
+ LGapResolver resolver_;
+
+ Safepoint::Kind expected_safepoint_kind_;
+
+ int old_position_;
+
+ class PushSafepointRegistersScope BASE_EMBEDDED {
+ public:
+ PushSafepointRegistersScope(LCodeGen* codegen,
+ Safepoint::Kind kind)
+ : codegen_(codegen) {
+ ASSERT(codegen_->info()->is_calling());
+ ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = kind;
+
+ UseScratchRegisterScope temps(codegen_->masm_);
+ // Preserve the value of lr which must be saved on the stack (the call to
+ // the stub will clobber it).
+ Register to_be_pushed_lr =
+ temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr());
+ codegen_->masm_->Mov(to_be_pushed_lr, lr);
+ switch (codegen_->expected_safepoint_kind_) {
+ case Safepoint::kWithRegisters: {
+ StoreRegistersStateStub stub(kDontSaveFPRegs);
+ codegen_->masm_->CallStub(&stub);
+ break;
+ }
+ case Safepoint::kWithRegistersAndDoubles: {
+ StoreRegistersStateStub stub(kSaveFPRegs);
+ codegen_->masm_->CallStub(&stub);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ ~PushSafepointRegistersScope() {
+ Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
+ ASSERT((kind & Safepoint::kWithRegisters) != 0);
+ switch (kind) {
+ case Safepoint::kWithRegisters: {
+ RestoreRegistersStateStub stub(kDontSaveFPRegs);
+ codegen_->masm_->CallStub(&stub);
+ break;
+ }
+ case Safepoint::kWithRegistersAndDoubles: {
+ RestoreRegistersStateStub stub(kSaveFPRegs);
+ codegen_->masm_->CallStub(&stub);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+ }
+
+ private:
+ LCodeGen* codegen_;
+ };
+
+ friend class LDeferredCode;
+ friend class SafepointGenerator;
+ DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+ explicit LDeferredCode(LCodeGen* codegen)
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_) {
+ codegen->AddDeferredCode(this);
+ }
+
+ virtual ~LDeferredCode() { }
+ virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
+
+ void SetExit(Label* exit) { external_exit_ = exit; }
+ Label* entry() { return &entry_; }
+ Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
+ int instruction_index() const { return instruction_index_; }
+
+ protected:
+ LCodeGen* codegen() const { return codegen_; }
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+ LCodeGen* codegen_;
+ Label entry_;
+ Label exit_;
+ Label* external_exit_;
+ int instruction_index_;
+};
+
+
+// This is the abstract class used by EmitBranchGeneric.
+// It is used to emit code for conditional branching. The Emit() function
+// emits code to branch when the condition holds and EmitInverted() emits
+// the branch when the inverted condition is verified.
+//
+// For actual examples of condition see the concrete implementation in
+// lithium-codegen-arm64.cc (e.g. BranchOnCondition, CompareAndBranch).
+class BranchGenerator BASE_EMBEDDED {
+ public:
+ explicit BranchGenerator(LCodeGen* codegen)
+ : codegen_(codegen) { }
+
+ virtual ~BranchGenerator() { }
+
+ virtual void Emit(Label* label) const = 0;
+ virtual void EmitInverted(Label* label) const = 0;
+
+ protected:
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ LCodeGen* codegen_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
diff --git a/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc b/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
new file mode 100644
index 000000000..f0a2e6bd0
--- /dev/null
+++ b/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
@@ -0,0 +1,334 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "arm64/lithium-gap-resolver-arm64.h"
+#include "arm64/lithium-codegen-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+// We use the root register to spill a value while breaking a cycle in parallel
+// moves. We don't need access to roots while resolving the move list and using
+// the root register has two advantages:
+// - It is not in crankshaft allocatable registers list, so it can't interfere
+// with any of the moves we are resolving.
+// - We don't need to push it on the stack, as we can reload it with its value
+// once we have resolved a cycle.
+#define kSavedValue root
+
+// We use the MacroAssembler floating-point scratch register to break a cycle
+// involving double values as the MacroAssembler will not need it for the
+// operations performed by the gap resolver.
+#define kSavedDoubleValue fp_scratch
+
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
+ saved_destination_(NULL), need_to_restore_root_(false) { }
+
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ ASSERT(moves_.is_empty());
+
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ root_index_ = i; // Any cycle is found when we reach this move again.
+ PerformMove(i);
+ if (in_cycle_) RestoreValue();
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+
+ if (!move.IsEliminated()) {
+ ASSERT(move.source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ if (need_to_restore_root_) {
+ ASSERT(kSavedValue.Is(root));
+ __ InitializeRootRegister();
+ need_to_restore_root_ = false;
+ }
+
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph.
+ LMoveOperands& current_move = moves_[index];
+
+ ASSERT(!current_move.IsPending());
+ ASSERT(!current_move.IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved in a stack allocated local. Multiple moves can
+ // be pending because this function is recursive.
+ ASSERT(current_move.source() != NULL); // Otherwise it will look eliminated.
+ LOperand* destination = current_move.destination();
+ current_move.set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ PerformMove(i);
+ // If there is a blocking, pending move it must be moves_[root_index_]
+ // and all other moves with the same source as moves_[root_index_] are
+ // sucessfully executed (because they are cycle-free) by this loop.
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ current_move.set_destination(destination);
+
+ // The move may be blocked on a pending move, which must be the starting move.
+ // In this case, we have a cycle, and we save the source of this move to
+ // a scratch register to break it.
+ LMoveOperands other_move = moves_[root_index_];
+ if (other_move.Blocks(destination)) {
+ ASSERT(other_move.IsPending());
+ BreakCycle(index);
+ return;
+ }
+
+ // This move is no longer blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+
+void LGapResolver::BreakCycle(int index) {
+ ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
+ ASSERT(!in_cycle_);
+
+ // We use registers which are not allocatable by crankshaft to break the cycle
+ // to be sure they don't interfere with the moves we are resolving.
+ ASSERT(!kSavedValue.IsAllocatable());
+ ASSERT(!kSavedDoubleValue.IsAllocatable());
+
+ // We save in a register the source of that move and we remember its
+ // destination. Then we mark this move as resolved so the cycle is
+ // broken and we can perform the other moves.
+ in_cycle_ = true;
+ LOperand* source = moves_[index].source();
+ saved_destination_ = moves_[index].destination();
+
+ if (source->IsRegister()) {
+ need_to_restore_root_ = true;
+ __ Mov(kSavedValue, cgen_->ToRegister(source));
+ } else if (source->IsStackSlot()) {
+ need_to_restore_root_ = true;
+ __ Ldr(kSavedValue, cgen_->ToMemOperand(source));
+ } else if (source->IsDoubleRegister()) {
+ ASSERT(cgen_->masm()->FPTmpList()->IncludesAliasOf(kSavedDoubleValue));
+ cgen_->masm()->FPTmpList()->Remove(kSavedDoubleValue);
+ __ Fmov(kSavedDoubleValue, cgen_->ToDoubleRegister(source));
+ } else if (source->IsDoubleStackSlot()) {
+ ASSERT(cgen_->masm()->FPTmpList()->IncludesAliasOf(kSavedDoubleValue));
+ cgen_->masm()->FPTmpList()->Remove(kSavedDoubleValue);
+ __ Ldr(kSavedDoubleValue, cgen_->ToMemOperand(source));
+ } else {
+ UNREACHABLE();
+ }
+
+ // Mark this move as resolved.
+ // This move will be actually performed by moving the saved value to this
+ // move's destination in LGapResolver::RestoreValue().
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+ ASSERT(in_cycle_);
+ ASSERT(saved_destination_ != NULL);
+
+ if (saved_destination_->IsRegister()) {
+ __ Mov(cgen_->ToRegister(saved_destination_), kSavedValue);
+ } else if (saved_destination_->IsStackSlot()) {
+ __ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
+ } else if (saved_destination_->IsDoubleRegister()) {
+ __ Fmov(cgen_->ToDoubleRegister(saved_destination_), kSavedDoubleValue);
+ cgen_->masm()->FPTmpList()->Combine(kSavedDoubleValue);
+ } else if (saved_destination_->IsDoubleStackSlot()) {
+ __ Str(kSavedDoubleValue, cgen_->ToMemOperand(saved_destination_));
+ cgen_->masm()->FPTmpList()->Combine(kSavedDoubleValue);
+ } else {
+ UNREACHABLE();
+ }
+
+ in_cycle_ = false;
+ saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+
+ if (source->IsRegister()) {
+ Register source_register = cgen_->ToRegister(source);
+ if (destination->IsRegister()) {
+ __ Mov(cgen_->ToRegister(destination), source_register);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ __ Str(source_register, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ Ldr(cgen_->ToRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ EmitStackSlotMove(index);
+ }
+
+ } else if (source->IsConstantOperand()) {
+ LConstantOperand* constant_source = LConstantOperand::cast(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ if (cgen_->IsSmi(constant_source)) {
+ __ Mov(dst, cgen_->ToSmi(constant_source));
+ } else if (cgen_->IsInteger32Constant(constant_source)) {
+ __ Mov(dst, cgen_->ToInteger32(constant_source));
+ } else {
+ __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ }
+ } else if (destination->IsDoubleRegister()) {
+ DoubleRegister result = cgen_->ToDoubleRegister(destination);
+ __ Fmov(result, cgen_->ToDouble(constant_source));
+ } else {
+ ASSERT(destination->IsStackSlot());
+ ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
+ need_to_restore_root_ = true;
+ if (cgen_->IsSmi(constant_source)) {
+ __ Mov(kSavedValue, cgen_->ToSmi(constant_source));
+ } else if (cgen_->IsInteger32Constant(constant_source)) {
+ __ Mov(kSavedValue, cgen_->ToInteger32(constant_source));
+ } else {
+ __ LoadObject(kSavedValue, cgen_->ToHandle(constant_source));
+ }
+ __ Str(kSavedValue, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister src = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ Fmov(cgen_->ToDoubleRegister(destination), src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ __ Str(src, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleStackSlot()) {
+ MemOperand src = cgen_->ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ Ldr(cgen_->ToDoubleRegister(destination), src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ EmitStackSlotMove(index);
+ }
+
+ } else {
+ UNREACHABLE();
+ }
+
+ // The move has been emitted, we can eliminate it.
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::EmitStackSlotMove(int index) {
+ // We need a temp register to perform a stack slot to stack slot move, and
+ // the register must not be involved in breaking cycles.
+
+ // Use the Crankshaft double scratch register as the temporary.
+ DoubleRegister temp = crankshaft_fp_scratch;
+
+ LOperand* src = moves_[index].source();
+ LOperand* dst = moves_[index].destination();
+
+ ASSERT(src->IsStackSlot());
+ ASSERT(dst->IsStackSlot());
+ __ Ldr(temp, cgen_->ToMemOperand(src));
+ __ Str(temp, cgen_->ToMemOperand(dst));
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/arm64/lithium-gap-resolver-arm64.h b/deps/v8/src/arm64/lithium-gap-resolver-arm64.h
new file mode 100644
index 000000000..d1637b65a
--- /dev/null
+++ b/deps/v8/src/arm64/lithium-gap-resolver-arm64.h
@@ -0,0 +1,90 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
+#define V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
+
+#include "v8.h"
+
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // If a cycle is found in the series of moves, save the blocking value to
+ // a scratch register. The cycle must be found by hitting the root of the
+ // depth-first search.
+ void BreakCycle(int index);
+
+ // After a cycle has been resolved, restore the value from the scratch
+ // register to its proper destination.
+ void RestoreValue();
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Emit a move from one stack slot to another.
+ void EmitStackSlotMove(int index);
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+
+ int root_index_;
+ bool in_cycle_;
+ LOperand* saved_destination_;
+
+ // We use the root register as a scratch in a few places. When that happens,
+ // this flag is set to indicate that it needs to be restored.
+ bool need_to_restore_root_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
new file mode 100644
index 000000000..d660d3601
--- /dev/null
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -0,0 +1,1677 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
+#define V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
+
+#include <ctype.h>
+
+#include "v8globals.h"
+#include "globals.h"
+
+#include "arm64/assembler-arm64.h"
+#include "arm64/assembler-arm64-inl.h"
+#include "arm64/macro-assembler-arm64.h"
+#include "arm64/instrument-arm64.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+MemOperand UntagSmiFieldMemOperand(Register object, int offset) {
+ return UntagSmiMemOperand(object, offset - kHeapObjectTag);
+}
+
+
+MemOperand UntagSmiMemOperand(Register object, int offset) {
+ // Assumes that Smis are shifted by 32 bits and little endianness.
+ STATIC_ASSERT(kSmiShift == 32);
+ return MemOperand(object, offset + (kSmiShift / kBitsPerByte));
+}
+
+
+Handle<Object> MacroAssembler::CodeObject() {
+ ASSERT(!code_object_.is_null());
+ return code_object_;
+}
+
+
+void MacroAssembler::And(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, AND);
+}
+
+
+void MacroAssembler::Ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, ANDS);
+}
+
+
+void MacroAssembler::Tst(const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS);
+}
+
+
+void MacroAssembler::Bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, BIC);
+}
+
+
+void MacroAssembler::Bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, BICS);
+}
+
+
+void MacroAssembler::Orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, ORR);
+}
+
+
+void MacroAssembler::Orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, ORN);
+}
+
+
+void MacroAssembler::Eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, EOR);
+}
+
+
+void MacroAssembler::Eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ LogicalMacro(rd, rn, operand, EON);
+}
+
+
+void MacroAssembler::Ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
+ } else {
+ ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
+ }
+}
+
+
+void MacroAssembler::Ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
+ } else {
+ ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
+ }
+}
+
+
+void MacroAssembler::Add(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB);
+ } else {
+ AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
+ }
+}
+
+void MacroAssembler::Adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB);
+ } else {
+ AddSubMacro(rd, rn, operand, SetFlags, ADD);
+ }
+}
+
+
+void MacroAssembler::Sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD);
+ } else {
+ AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
+ }
+}
+
+
+void MacroAssembler::Subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD);
+ } else {
+ AddSubMacro(rd, rn, operand, SetFlags, SUB);
+ }
+}
+
+
+void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ Adds(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ Subs(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Neg(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ if (operand.IsImmediate()) {
+ Mov(rd, -operand.immediate());
+ } else {
+ Sub(rd, AppropriateZeroRegFor(rd), operand);
+ }
+}
+
+
+void MacroAssembler::Negs(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ Subs(rd, AppropriateZeroRegFor(rd), operand);
+}
+
+
+void MacroAssembler::Adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
+}
+
+
+void MacroAssembler::Adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
+}
+
+
+void MacroAssembler::Sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
+}
+
+
+void MacroAssembler::Sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
+}
+
+
+void MacroAssembler::Ngc(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ Register zr = AppropriateZeroRegFor(rd);
+ Sbc(rd, zr, operand);
+}
+
+
+void MacroAssembler::Ngcs(const Register& rd,
+ const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ Register zr = AppropriateZeroRegFor(rd);
+ Sbcs(rd, zr, operand);
+}
+
+
+void MacroAssembler::Mvn(const Register& rd, uint64_t imm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ Mov(rd, ~imm);
+}
+
+
+#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
+void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
+ ASSERT(allow_macro_instructions_); \
+ LoadStoreMacro(REG, addr, OP); \
+}
+LS_MACRO_LIST(DEFINE_FUNCTION)
+#undef DEFINE_FUNCTION
+
+
+void MacroAssembler::Adr(const Register& rd, Label* label) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ adr(rd, label);
+}
+
+
+void MacroAssembler::Asr(const Register& rd,
+ const Register& rn,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ asr(rd, rn, shift);
+}
+
+
+void MacroAssembler::Asr(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ asrv(rd, rn, rm);
+}
+
+
+void MacroAssembler::B(Label* label) {
+ b(label);
+ CheckVeneerPool(false, false);
+}
+
+
+void MacroAssembler::B(Condition cond, Label* label) {
+ ASSERT(allow_macro_instructions_);
+ B(label, cond);
+}
+
+
+void MacroAssembler::Bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ bfi(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ bfxil(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Bind(Label* label) {
+ ASSERT(allow_macro_instructions_);
+ bind(label);
+}
+
+
+void MacroAssembler::Bl(Label* label) {
+ ASSERT(allow_macro_instructions_);
+ bl(label);
+}
+
+
+void MacroAssembler::Blr(const Register& xn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!xn.IsZero());
+ blr(xn);
+}
+
+
+void MacroAssembler::Br(const Register& xn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!xn.IsZero());
+ br(xn);
+}
+
+
+void MacroAssembler::Brk(int code) {
+ ASSERT(allow_macro_instructions_);
+ brk(code);
+}
+
+
+void MacroAssembler::Cinc(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cinc(rd, rn, cond);
+}
+
+
+void MacroAssembler::Cinv(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cinv(rd, rn, cond);
+}
+
+
+void MacroAssembler::Cls(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ cls(rd, rn);
+}
+
+
+void MacroAssembler::Clz(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ clz(rd, rn);
+}
+
+
+void MacroAssembler::Cneg(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cneg(rd, rn, cond);
+}
+
+
+// Conditionally zero the destination register. Only X registers are supported
+// due to the truncation side-effect when used on W registers.
+void MacroAssembler::CzeroX(const Register& rd,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsSP() && rd.Is64Bits());
+ ASSERT((cond != al) && (cond != nv));
+ csel(rd, xzr, rd, cond);
+}
+
+
+// Conditionally move a value into the destination register. Only X registers
+// are supported due to the truncation side-effect when used on W registers.
+void MacroAssembler::CmovX(const Register& rd,
+ const Register& rn,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsSP());
+ ASSERT(rd.Is64Bits() && rn.Is64Bits());
+ ASSERT((cond != al) && (cond != nv));
+ if (!rd.is(rn)) {
+ csel(rd, rn, rd, cond);
+ }
+}
+
+
+void MacroAssembler::Cset(const Register& rd, Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ cset(rd, cond);
+}
+
+
+void MacroAssembler::Csetm(const Register& rd, Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csetm(rd, cond);
+}
+
+
+void MacroAssembler::Csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csinc(rd, rn, rm, cond);
+}
+
+
+void MacroAssembler::Csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csinv(rd, rn, rm, cond);
+}
+
+
+void MacroAssembler::Csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ csneg(rd, rn, rm, cond);
+}
+
+
+void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) {
+ ASSERT(allow_macro_instructions_);
+ dmb(domain, type);
+}
+
+
+void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) {
+ ASSERT(allow_macro_instructions_);
+ dsb(domain, type);
+}
+
+
+void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) {
+ ASSERT(allow_macro_instructions_);
+ debug(message, code, params);
+}
+
+
+void MacroAssembler::Extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ extr(rd, rn, rm, lsb);
+}
+
+
+void MacroAssembler::Fabs(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fabs(fd, fn);
+}
+
+
+void MacroAssembler::Fadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fadd(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT((cond != al) && (cond != nv));
+ fccmp(fn, fm, nzcv, cond);
+}
+
+
+void MacroAssembler::Fcmp(const FPRegister& fn, const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fcmp(fn, fm);
+}
+
+
+void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
+ ASSERT(allow_macro_instructions_);
+ if (value != 0.0) {
+ UseScratchRegisterScope temps(this);
+ FPRegister tmp = temps.AcquireSameSizeAs(fn);
+ Fmov(tmp, value);
+ fcmp(fn, tmp);
+ } else {
+ fcmp(fn, value);
+ }
+}
+
+
+void MacroAssembler::Fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT((cond != al) && (cond != nv));
+ fcsel(fd, fn, fm, cond);
+}
+
+
+void MacroAssembler::Fcvt(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fcvt(fd, fn);
+}
+
+
+void MacroAssembler::Fcvtas(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtas(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtau(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtau(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtms(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtms(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtmu(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtmu(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtns(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtns(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtnu(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtnu(rd, fn);
+}
+
+
+void MacroAssembler::Fcvtzs(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtzs(rd, fn);
+}
+void MacroAssembler::Fcvtzu(const Register& rd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fcvtzu(rd, fn);
+}
+
+
+void MacroAssembler::Fdiv(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fdiv(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fmadd(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Fmax(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmax(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmaxnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmaxnm(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmin(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmin(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fminnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fminnm(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, FPRegister fn) {
+ ASSERT(allow_macro_instructions_);
+ // Only emit an instruction if fd and fn are different, and they are both D
+ // registers. fmov(s0, s0) is not a no-op because it clears the top word of
+ // d0. Technically, fmov(d0, d0) is not a no-op either because it clears the
+ // top of q0, but FPRegister does not currently support Q registers.
+ if (!fd.Is(fn) || !fd.Is64Bits()) {
+ fmov(fd, fn);
+ }
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, Register rn) {
+ ASSERT(allow_macro_instructions_);
+ fmov(fd, rn);
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, double imm) {
+ ASSERT(allow_macro_instructions_);
+ if (fd.Is32Bits()) {
+ Fmov(fd, static_cast<float>(imm));
+ return;
+ }
+
+ ASSERT(fd.Is64Bits());
+ if (IsImmFP64(imm)) {
+ fmov(fd, imm);
+ } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
+ fmov(fd, xzr);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ // TODO(all): Use Assembler::ldr(const FPRegister& ft, double imm).
+ Mov(tmp, double_to_rawbits(imm));
+ Fmov(fd, tmp);
+ }
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, float imm) {
+ ASSERT(allow_macro_instructions_);
+ if (fd.Is64Bits()) {
+ Fmov(fd, static_cast<double>(imm));
+ return;
+ }
+
+ ASSERT(fd.Is32Bits());
+ if (IsImmFP32(imm)) {
+ fmov(fd, imm);
+ } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
+ fmov(fd, wzr);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireW();
+ // TODO(all): Use Assembler::ldr(const FPRegister& ft, float imm).
+ Mov(tmp, float_to_rawbits(imm));
+ Fmov(fd, tmp);
+ }
+}
+
+
+void MacroAssembler::Fmov(Register rd, FPRegister fn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ fmov(rd, fn);
+}
+
+
+void MacroAssembler::Fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fmsub(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Fmul(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fmul(fd, fn, fm);
+}
+
+
+void MacroAssembler::Fneg(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fneg(fd, fn);
+}
+
+
+void MacroAssembler::Fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fnmadd(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa) {
+ ASSERT(allow_macro_instructions_);
+ fnmsub(fd, fn, fm, fa);
+}
+
+
+void MacroAssembler::Frinta(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ frinta(fd, fn);
+}
+
+
+void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ frintn(fd, fn);
+}
+
+
+void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ frintz(fd, fn);
+}
+
+
+void MacroAssembler::Fsqrt(const FPRegister& fd, const FPRegister& fn) {
+ ASSERT(allow_macro_instructions_);
+ fsqrt(fd, fn);
+}
+
+
+void MacroAssembler::Fsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm) {
+ ASSERT(allow_macro_instructions_);
+ fsub(fd, fn, fm);
+}
+
+
+void MacroAssembler::Hint(SystemHint code) {
+ ASSERT(allow_macro_instructions_);
+ hint(code);
+}
+
+
+void MacroAssembler::Hlt(int code) {
+ ASSERT(allow_macro_instructions_);
+ hlt(code);
+}
+
+
+void MacroAssembler::Isb() {
+ ASSERT(allow_macro_instructions_);
+ isb();
+}
+
+
+void MacroAssembler::Ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!AreAliased(rt, rt2));
+ ldnp(rt, rt2, src);
+}
+
+
+void MacroAssembler::Ldp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!AreAliased(rt, rt2));
+ ldp(rt, rt2, src);
+}
+
+
+void MacroAssembler::Ldpsw(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rt.IsZero());
+ ASSERT(!rt2.IsZero());
+ ldpsw(rt, rt2, src);
+}
+
+
+void MacroAssembler::Ldr(const FPRegister& ft, double imm) {
+ ASSERT(allow_macro_instructions_);
+ ldr(ft, imm);
+}
+
+
+void MacroAssembler::Ldr(const Register& rt, uint64_t imm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rt.IsZero());
+ ldr(rt, imm);
+}
+
+
+void MacroAssembler::Lsl(const Register& rd,
+ const Register& rn,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lsl(rd, rn, shift);
+}
+
+
+void MacroAssembler::Lsl(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lslv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Lsr(const Register& rd,
+ const Register& rn,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lsr(rd, rn, shift);
+}
+
+
+void MacroAssembler::Lsr(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ lsrv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ madd(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Mneg(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ mneg(rd, rn, rm);
+}
+
+
+void MacroAssembler::Mov(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ // Emit a register move only if the registers are distinct, or if they are
+ // not X registers. Note that mov(w0, w0) is not a no-op because it clears
+ // the top word of x0.
+ if (!rd.Is(rn) || !rd.Is64Bits()) {
+ Assembler::mov(rd, rn);
+ }
+}
+
+
+void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ movk(rd, imm, shift);
+}
+
+
+void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rt.IsZero());
+ mrs(rt, sysreg);
+}
+
+
+void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rt.IsZero());
+ msr(sysreg, rt);
+}
+
+
+void MacroAssembler::Msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ msub(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Mul(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ mul(rd, rn, rm);
+}
+
+
+void MacroAssembler::Rbit(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rbit(rd, rn);
+}
+
+
+void MacroAssembler::Ret(const Register& xn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!xn.IsZero());
+ ret(xn);
+ CheckVeneerPool(false, false);
+}
+
+
+void MacroAssembler::Rev(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rev(rd, rn);
+}
+
+
+void MacroAssembler::Rev16(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rev16(rd, rn);
+}
+
+
+void MacroAssembler::Rev32(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rev32(rd, rn);
+}
+
+
+void MacroAssembler::Ror(const Register& rd,
+ const Register& rs,
+ unsigned shift) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ror(rd, rs, shift);
+}
+
+
+void MacroAssembler::Ror(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ rorv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sbfiz(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sbfx(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Scvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ ASSERT(allow_macro_instructions_);
+ scvtf(fd, rn, fbits);
+}
+
+
+void MacroAssembler::Sdiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sdiv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smaddl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smsubl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Smull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smull(rd, rn, rm);
+}
+
+
+void MacroAssembler::Smulh(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ smulh(rd, rn, rm);
+}
+
+
+void MacroAssembler::Stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ ASSERT(allow_macro_instructions_);
+ stnp(rt, rt2, dst);
+}
+
+
+void MacroAssembler::Stp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ ASSERT(allow_macro_instructions_);
+ stp(rt, rt2, dst);
+}
+
+
+void MacroAssembler::Sxtb(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sxtb(rd, rn);
+}
+
+
+void MacroAssembler::Sxth(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sxth(rd, rn);
+}
+
+
+void MacroAssembler::Sxtw(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ sxtw(rd, rn);
+}
+
+
+void MacroAssembler::Ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ubfiz(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ubfx(rd, rn, lsb, width);
+}
+
+
+void MacroAssembler::Ucvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits) {
+ ASSERT(allow_macro_instructions_);
+ ucvtf(fd, rn, fbits);
+}
+
+
+void MacroAssembler::Udiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ udiv(rd, rn, rm);
+}
+
+
+void MacroAssembler::Umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ umaddl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ umsubl(rd, rn, rm, ra);
+}
+
+
+void MacroAssembler::Uxtb(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ uxtb(rd, rn);
+}
+
+
+void MacroAssembler::Uxth(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ uxth(rd, rn);
+}
+
+
+void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ uxtw(rd, rn);
+}
+
+
+void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
+ ASSERT(!csp.Is(sp_));
+ // TODO(jbramley): Several callers rely on this not using scratch registers,
+ // so we use the assembler directly here. However, this means that large
+ // immediate values of 'space' cannot be handled cleanly. (Only 24-bits
+ // immediates or values of 'space' that can be encoded in one instruction are
+ // accepted.) Once we implement our flexible scratch register idea, we could
+ // greatly simplify this function.
+ InstructionAccurateScope scope(this);
+ if ((space.IsImmediate()) && !is_uint12(space.immediate())) {
+ // The subtract instruction supports a 12-bit immediate, shifted left by
+ // zero or 12 bits. So, in two instructions, we can subtract any immediate
+ // between zero and (1 << 24) - 1.
+ int64_t imm = space.immediate();
+ ASSERT(is_uint24(imm));
+
+ int64_t imm_top_12_bits = imm >> 12;
+ sub(csp, StackPointer(), imm_top_12_bits << 12);
+ imm -= imm_top_12_bits << 12;
+ if (imm > 0) {
+ sub(csp, csp, imm);
+ }
+ } else {
+ sub(csp, StackPointer(), space);
+ }
+}
+
+
+void MacroAssembler::InitializeRootRegister() {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ Mov(root, Operand(roots_array_start));
+}
+
+
+void MacroAssembler::SmiTag(Register dst, Register src) {
+ ASSERT(dst.Is64Bits() && src.Is64Bits());
+ Lsl(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
+
+
+void MacroAssembler::SmiUntag(Register dst, Register src) {
+ ASSERT(dst.Is64Bits() && src.Is64Bits());
+ if (FLAG_enable_slow_asserts) {
+ AssertSmi(src);
+ }
+ Asr(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
+
+
+void MacroAssembler::SmiUntagToDouble(FPRegister dst,
+ Register src,
+ UntagMode mode) {
+ ASSERT(dst.Is64Bits() && src.Is64Bits());
+ if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
+ AssertSmi(src);
+ }
+ Scvtf(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::SmiUntagToFloat(FPRegister dst,
+ Register src,
+ UntagMode mode) {
+ ASSERT(dst.Is32Bits() && src.Is64Bits());
+ if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
+ AssertSmi(src);
+ }
+ Scvtf(dst, src, kSmiShift);
+}
+
+
+void MacroAssembler::JumpIfSmi(Register value,
+ Label* smi_label,
+ Label* not_smi_label) {
+ STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+ // Check if the tag bit is set.
+ if (smi_label) {
+ Tbz(value, 0, smi_label);
+ if (not_smi_label) {
+ B(not_smi_label);
+ }
+ } else {
+ ASSERT(not_smi_label);
+ Tbnz(value, 0, not_smi_label);
+ }
+}
+
+
+void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
+ JumpIfSmi(value, NULL, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfBothSmi(Register value1,
+ Register value2,
+ Label* both_smi_label,
+ Label* not_smi_label) {
+ STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ // Check if both tag bits are clear.
+ Orr(tmp, value1, value2);
+ JumpIfSmi(tmp, both_smi_label, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfEitherSmi(Register value1,
+ Register value2,
+ Label* either_smi_label,
+ Label* not_smi_label) {
+ STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ // Check if either tag bit is clear.
+ And(tmp, value1, value2);
+ JumpIfSmi(tmp, either_smi_label, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfEitherNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label) {
+ JumpIfBothSmi(value1, value2, NULL, not_smi_label);
+}
+
+
+void MacroAssembler::JumpIfBothNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label) {
+ JumpIfEitherSmi(value1, value2, NULL, not_smi_label);
+}
+
+
+void MacroAssembler::IsObjectNameType(Register object,
+ Register type,
+ Label* fail) {
+ CompareObjectType(object, type, type, LAST_NAME_TYPE);
+ B(hi, fail);
+}
+
+
+void MacroAssembler::IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail) {
+ Ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
+ IsInstanceJSObjectType(map, scratch, fail);
+}
+
+
+void MacroAssembler::IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail) {
+ Ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ // If cmp result is lt, the following ccmp will clear all flags.
+ // Z == 0, N == V implies gt condition.
+ Cmp(scratch, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ Ccmp(scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE, NoFlag, ge);
+
+ // If we didn't get a valid label object just fall through and leave the
+ // flags updated.
+ if (fail != NULL) {
+ B(gt, fail);
+ }
+}
+
+
+void MacroAssembler::IsObjectJSStringType(Register object,
+ Register type,
+ Label* not_string,
+ Label* string) {
+ Ldr(type, FieldMemOperand(object, HeapObject::kMapOffset));
+ Ldrb(type.W(), FieldMemOperand(type, Map::kInstanceTypeOffset));
+
+ STATIC_ASSERT(kStringTag == 0);
+ ASSERT((string != NULL) || (not_string != NULL));
+ if (string == NULL) {
+ TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
+ } else if (not_string == NULL) {
+ TestAndBranchIfAllClear(type.W(), kIsNotStringMask, string);
+ } else {
+ TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
+ B(string);
+ }
+}
+
+
+void MacroAssembler::Push(Handle<Object> handle) {
+ UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Mov(tmp, Operand(handle));
+ Push(tmp);
+}
+
+
+void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) {
+ uint64_t size = count * unit_size;
+
+ if (size == 0) {
+ return;
+ }
+
+ if (csp.Is(StackPointer())) {
+ ASSERT(size % 16 == 0);
+ } else {
+ BumpSystemStackPointer(size);
+ }
+
+ Sub(StackPointer(), StackPointer(), size);
+}
+
+
+void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
+ ASSERT(IsPowerOf2(unit_size));
+
+ if (unit_size == 0) {
+ return;
+ }
+
+ const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
+ const Operand size(count, LSL, shift);
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ if (!csp.Is(StackPointer())) {
+ BumpSystemStackPointer(size);
+ }
+
+ Sub(StackPointer(), StackPointer(), size);
+}
+
+
+void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
+ ASSERT(IsPowerOf2(unit_size));
+ const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
+ const Operand size(count_smi,
+ (shift >= 0) ? (LSL) : (LSR),
+ (shift >= 0) ? (shift) : (-shift));
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ if (!csp.Is(StackPointer())) {
+ BumpSystemStackPointer(size);
+ }
+
+ Sub(StackPointer(), StackPointer(), size);
+}
+
+
+void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
+ uint64_t size = count * unit_size;
+
+ if (size == 0) {
+ return;
+ }
+
+ Add(StackPointer(), StackPointer(), size);
+
+ if (csp.Is(StackPointer())) {
+ ASSERT(size % 16 == 0);
+ } else if (emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
+ ASSERT(IsPowerOf2(unit_size));
+
+ if (unit_size == 0) {
+ return;
+ }
+
+ const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
+ const Operand size(count, LSL, shift);
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ Add(StackPointer(), StackPointer(), size);
+
+ if (!csp.Is(StackPointer()) && emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
+ ASSERT(IsPowerOf2(unit_size));
+ const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
+ const Operand size(count_smi,
+ (shift >= 0) ? (LSL) : (LSR),
+ (shift >= 0) ? (shift) : (-shift));
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ Add(StackPointer(), StackPointer(), size);
+
+ if (!csp.Is(StackPointer()) && emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::CompareAndBranch(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* label) {
+ if (rhs.IsImmediate() && (rhs.immediate() == 0) &&
+ ((cond == eq) || (cond == ne))) {
+ if (cond == eq) {
+ Cbz(lhs, label);
+ } else {
+ Cbnz(lhs, label);
+ }
+ } else {
+ Cmp(lhs, rhs);
+ B(cond, label);
+ }
+}
+
+
+void MacroAssembler::TestAndBranchIfAnySet(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label) {
+ int bits = reg.SizeInBits();
+ ASSERT(CountSetBits(bit_pattern, bits) > 0);
+ if (CountSetBits(bit_pattern, bits) == 1) {
+ Tbnz(reg, MaskToBit(bit_pattern), label);
+ } else {
+ Tst(reg, bit_pattern);
+ B(ne, label);
+ }
+}
+
+
+void MacroAssembler::TestAndBranchIfAllClear(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label) {
+ int bits = reg.SizeInBits();
+ ASSERT(CountSetBits(bit_pattern, bits) > 0);
+ if (CountSetBits(bit_pattern, bits) == 1) {
+ Tbz(reg, MaskToBit(bit_pattern), label);
+ } else {
+ Tst(reg, bit_pattern);
+ B(eq, label);
+ }
+}
+
+
+void MacroAssembler::InlineData(uint64_t data) {
+ ASSERT(is_uint16(data));
+ InstructionAccurateScope scope(this, 1);
+ movz(xzr, data);
+}
+
+
+void MacroAssembler::EnableInstrumentation() {
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, InstrumentStateEnable);
+}
+
+
+void MacroAssembler::DisableInstrumentation() {
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, InstrumentStateDisable);
+}
+
+
+void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
+ ASSERT(strlen(marker_name) == 2);
+
+ // We allow only printable characters in the marker names. Unprintable
+ // characters are reserved for controlling features of the instrumentation.
+ ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
+
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, (marker_name[1] << 8) | marker_name[0]);
+}
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
new file mode 100644
index 000000000..08ddb8782
--- /dev/null
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -0,0 +1,5184 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "bootstrapper.h"
+#include "codegen.h"
+#include "cpu-profiler.h"
+#include "debug.h"
+#include "isolate-inl.h"
+#include "runtime.h"
+
+namespace v8 {
+namespace internal {
+
+// Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
+#define __
+
+
+MacroAssembler::MacroAssembler(Isolate* arg_isolate,
+ byte * buffer,
+ unsigned buffer_size)
+ : Assembler(arg_isolate, buffer, buffer_size),
+ generating_stub_(false),
+#if DEBUG
+ allow_macro_instructions_(true),
+#endif
+ has_frame_(false),
+ use_real_aborts_(true),
+ sp_(jssp), tmp_list_(ip0, ip1), fptmp_list_(fp_scratch) {
+ if (isolate() != NULL) {
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+ isolate());
+ }
+}
+
+
+void MacroAssembler::LogicalMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op) {
+ UseScratchRegisterScope temps(this);
+
+ if (operand.NeedsRelocation()) {
+ Register temp = temps.AcquireX();
+ LoadRelocated(temp, operand);
+ Logical(rd, rn, temp, op);
+
+ } else if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ unsigned reg_size = rd.SizeInBits();
+ ASSERT(rd.Is64Bits() || is_uint32(immediate));
+
+ // If the operation is NOT, invert the operation and immediate.
+ if ((op & NOT) == NOT) {
+ op = static_cast<LogicalOp>(op & ~NOT);
+ immediate = ~immediate;
+ if (rd.Is32Bits()) {
+ immediate &= kWRegMask;
+ }
+ }
+
+ // Special cases for all set or all clear immediates.
+ if (immediate == 0) {
+ switch (op) {
+ case AND:
+ Mov(rd, 0);
+ return;
+ case ORR: // Fall through.
+ case EOR:
+ Mov(rd, rn);
+ return;
+ case ANDS: // Fall through.
+ case BICS:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if ((rd.Is64Bits() && (immediate == -1L)) ||
+ (rd.Is32Bits() && (immediate == 0xffffffffL))) {
+ switch (op) {
+ case AND:
+ Mov(rd, rn);
+ return;
+ case ORR:
+ Mov(rd, immediate);
+ return;
+ case EOR:
+ Mvn(rd, rn);
+ return;
+ case ANDS: // Fall through.
+ case BICS:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ unsigned n, imm_s, imm_r;
+ if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be encoded in the instruction.
+ LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
+ } else {
+ // Immediate can't be encoded: synthesize using move immediate.
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Mov(temp, immediate);
+ if (rd.Is(csp)) {
+ // If rd is the stack pointer we cannot use it as the destination
+ // register so we use the temp register as an intermediate again.
+ Logical(temp, rn, temp, op);
+ Mov(csp, temp);
+ } else {
+ Logical(rd, rn, temp, op);
+ }
+ }
+
+ } else if (operand.IsExtendedRegister()) {
+ ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
+ // Add/sub extended supports shift <= 4. We want to support exactly the
+ // same modes here.
+ ASSERT(operand.shift_amount() <= 4);
+ ASSERT(operand.reg().Is64Bits() ||
+ ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
+ Register temp = temps.AcquireSameSizeAs(rn);
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ Logical(rd, rn, temp, op);
+
+ } else {
+ // The operand can be encoded in the instruction.
+ ASSERT(operand.IsShiftedRegister());
+ Logical(rd, rn, operand, op);
+ }
+}
+
+
+void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
+ ASSERT(!rd.IsZero());
+
+ // TODO(all) extend to support more immediates.
+ //
+ // Immediates on Aarch64 can be produced using an initial value, and zero to
+ // three move keep operations.
+ //
+ // Initial values can be generated with:
+ // 1. 64-bit move zero (movz).
+ // 2. 32-bit move inverted (movn).
+ // 3. 64-bit move inverted.
+ // 4. 32-bit orr immediate.
+ // 5. 64-bit orr immediate.
+ // Move-keep may then be used to modify each of the 16-bit half-words.
+ //
+ // The code below supports all five initial value generators, and
+ // applying move-keep operations to move-zero and move-inverted initial
+ // values.
+
+ unsigned reg_size = rd.SizeInBits();
+ unsigned n, imm_s, imm_r;
+ if (IsImmMovz(imm, reg_size) && !rd.IsSP()) {
+ // Immediate can be represented in a move zero instruction. Movz can't
+ // write to the stack pointer.
+ movz(rd, imm);
+ } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) {
+ // Immediate can be represented in a move inverted instruction. Movn can't
+ // write to the stack pointer.
+ movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
+ } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be represented in a logical orr instruction.
+ LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
+ } else {
+ // Generic immediate case. Imm will be represented by
+ // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
+ // A move-zero or move-inverted is generated for the first non-zero or
+ // non-0xffff immX, and a move-keep for subsequent non-zero immX.
+
+ uint64_t ignored_halfword = 0;
+ bool invert_move = false;
+ // If the number of 0xffff halfwords is greater than the number of 0x0000
+ // halfwords, it's more efficient to use move-inverted.
+ if (CountClearHalfWords(~imm, reg_size) >
+ CountClearHalfWords(imm, reg_size)) {
+ ignored_halfword = 0xffffL;
+ invert_move = true;
+ }
+
+ // Mov instructions can't move immediate values into the stack pointer, so
+ // set up a temporary register, if needed.
+ UseScratchRegisterScope temps(this);
+ Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
+
+ // Iterate through the halfwords. Use movn/movz for the first non-ignored
+ // halfword, and movk for subsequent halfwords.
+ ASSERT((reg_size % 16) == 0);
+ bool first_mov_done = false;
+ for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
+ uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
+ if (imm16 != ignored_halfword) {
+ if (!first_mov_done) {
+ if (invert_move) {
+ movn(temp, (~imm16) & 0xffffL, 16 * i);
+ } else {
+ movz(temp, imm16, 16 * i);
+ }
+ first_mov_done = true;
+ } else {
+ // Construct a wider constant.
+ movk(temp, imm16, 16 * i);
+ }
+ }
+ }
+ ASSERT(first_mov_done);
+
+ // Move the temporary if the original destination register was the stack
+ // pointer.
+ if (rd.IsSP()) {
+ mov(rd, temp);
+ }
+ }
+}
+
+
+void MacroAssembler::Mov(const Register& rd,
+ const Operand& operand,
+ DiscardMoveMode discard_mode) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+
+ // Provide a swap register for instructions that need to write into the
+ // system stack pointer (and can't do this inherently).
+ UseScratchRegisterScope temps(this);
+ Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
+
+ if (operand.NeedsRelocation()) {
+ LoadRelocated(dst, operand);
+
+ } else if (operand.IsImmediate()) {
+ // Call the macro assembler for generic immediates.
+ Mov(dst, operand.immediate());
+
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
+ // Emit a shift instruction if moving a shifted register. This operation
+ // could also be achieved using an orr instruction (like orn used by Mvn),
+ // but using a shift instruction makes the disassembly clearer.
+ EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
+
+ } else if (operand.IsExtendedRegister()) {
+ // Emit an extend instruction if moving an extended register. This handles
+ // extend with post-shift operations, too.
+ EmitExtendShift(dst, operand.reg(), operand.extend(),
+ operand.shift_amount());
+
+ } else {
+ // Otherwise, emit a register move only if the registers are distinct, or
+ // if they are not X registers.
+ //
+ // Note that mov(w0, w0) is not a no-op because it clears the top word of
+ // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
+ // registers is not required to clear the top word of the X register. In
+ // this case, the instruction is discarded.
+ //
+ // If csp is an operand, add #0 is emitted, otherwise, orr #0.
+ if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
+ (discard_mode == kDontDiscardForSameWReg))) {
+ Assembler::mov(rd, operand.reg());
+ }
+ // This case can handle writes into the system stack pointer directly.
+ dst = rd;
+ }
+
+ // Copy the result to the system stack pointer.
+ if (!dst.Is(rd)) {
+ ASSERT(rd.IsSP());
+ Assembler::mov(rd, dst);
+ }
+}
+
+
+void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
+ ASSERT(allow_macro_instructions_);
+
+ if (operand.NeedsRelocation()) {
+ LoadRelocated(rd, operand);
+ mvn(rd, rd);
+
+ } else if (operand.IsImmediate()) {
+ // Call the macro assembler for generic immediates.
+ Mov(rd, ~operand.immediate());
+
+ } else if (operand.IsExtendedRegister()) {
+ // Emit two instructions for the extend case. This differs from Mov, as
+ // the extend and invert can't be achieved in one instruction.
+ EmitExtendShift(rd, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ mvn(rd, rd);
+
+ } else {
+ mvn(rd, operand);
+ }
+}
+
+
+unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
+ ASSERT((reg_size % 8) == 0);
+ int count = 0;
+ for (unsigned i = 0; i < (reg_size / 16); i++) {
+ if ((imm & 0xffff) == 0) {
+ count++;
+ }
+ imm >>= 16;
+ }
+ return count;
+}
+
+
+// The movz instruction can generate immediates containing an arbitrary 16-bit
+// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
+bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
+ ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
+ return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
+}
+
+
+// The movn instruction can generate immediates containing an arbitrary 16-bit
+// half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
+bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
+ return IsImmMovz(~imm, reg_size);
+}
+
+
+void MacroAssembler::ConditionalCompareMacro(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op) {
+ ASSERT((cond != al) && (cond != nv));
+ if (operand.NeedsRelocation()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ LoadRelocated(temp, operand);
+ ConditionalCompareMacro(rn, temp, nzcv, cond, op);
+
+ } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
+ (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
+ // The immediate can be encoded in the instruction, or the operand is an
+ // unshifted register: call the assembler.
+ ConditionalCompare(rn, operand, nzcv, cond, op);
+
+ } else {
+ // The operand isn't directly supported by the instruction: perform the
+ // operation on a temporary register.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Mov(temp, operand);
+ ConditionalCompare(rn, temp, nzcv, cond, op);
+ }
+}
+
+
+void MacroAssembler::Csel(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+ ASSERT((cond != al) && (cond != nv));
+ if (operand.IsImmediate()) {
+ // Immediate argument. Handle special cases of 0, 1 and -1 using zero
+ // register.
+ int64_t imm = operand.immediate();
+ Register zr = AppropriateZeroRegFor(rn);
+ if (imm == 0) {
+ csel(rd, rn, zr, cond);
+ } else if (imm == 1) {
+ csinc(rd, rn, zr, cond);
+ } else if (imm == -1) {
+ csinv(rd, rn, zr, cond);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Mov(temp, operand.immediate());
+ csel(rd, rn, temp, cond);
+ }
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
+ // Unshifted register argument.
+ csel(rd, rn, operand.reg(), cond);
+ } else {
+ // All other arguments.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Mov(temp, operand);
+ csel(rd, rn, temp, cond);
+ }
+}
+
+
+void MacroAssembler::AddSubMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op) {
+ if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
+ !operand.NeedsRelocation() && (S == LeaveFlags)) {
+ // The instruction would be a nop. Avoid generating useless code.
+ return;
+ }
+
+ if (operand.NeedsRelocation()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ LoadRelocated(temp, operand);
+ AddSubMacro(rd, rn, temp, S, op);
+ } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
+ (rn.IsZero() && !operand.IsShiftedRegister()) ||
+ (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Mov(temp, operand);
+ AddSub(rd, rn, temp, S, op);
+ } else {
+ AddSub(rd, rn, operand, S, op);
+ }
+}
+
+
+void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op) {
+ ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ UseScratchRegisterScope temps(this);
+
+ if (operand.NeedsRelocation()) {
+ Register temp = temps.AcquireX();
+ LoadRelocated(temp, operand);
+ AddSubWithCarryMacro(rd, rn, temp, S, op);
+
+ } else if (operand.IsImmediate() ||
+ (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
+ // Add/sub with carry (immediate or ROR shifted register.)
+ Register temp = temps.AcquireSameSizeAs(rn);
+ Mov(temp, operand);
+ AddSubWithCarry(rd, rn, temp, S, op);
+
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
+ // Add/sub with carry (shifted register).
+ ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
+ ASSERT(operand.shift() != ROR);
+ ASSERT(is_uintn(operand.shift_amount(),
+ rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
+ : kWRegSizeInBitsLog2));
+ Register temp = temps.AcquireSameSizeAs(rn);
+ EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
+ AddSubWithCarry(rd, rn, temp, S, op);
+
+ } else if (operand.IsExtendedRegister()) {
+ // Add/sub with carry (extended register).
+ ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
+ // Add/sub extended supports a shift <= 4. We want to support exactly the
+ // same modes.
+ ASSERT(operand.shift_amount() <= 4);
+ ASSERT(operand.reg().Is64Bits() ||
+ ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
+ Register temp = temps.AcquireSameSizeAs(rn);
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ AddSubWithCarry(rd, rn, temp, S, op);
+
+ } else {
+ // The addressing mode is directly supported by the instruction.
+ AddSubWithCarry(rd, rn, operand, S, op);
+ }
+}
+
+
+void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op) {
+ int64_t offset = addr.offset();
+ LSDataSize size = CalcLSDataSize(op);
+
+ // Check if an immediate offset fits in the immediate field of the
+ // appropriate instruction. If not, emit two instructions to perform
+ // the operation.
+ if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
+ !IsImmLSUnscaled(offset)) {
+ // Immediate offset that can't be encoded using unsigned or unscaled
+ // addressing modes.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(addr.base());
+ Mov(temp, addr.offset());
+ LoadStore(rt, MemOperand(addr.base(), temp), op);
+ } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
+ // Post-index beyond unscaled addressing range.
+ LoadStore(rt, MemOperand(addr.base()), op);
+ add(addr.base(), addr.base(), offset);
+ } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
+ // Pre-index beyond unscaled addressing range.
+ add(addr.base(), addr.base(), offset);
+ LoadStore(rt, MemOperand(addr.base()), op);
+ } else {
+ // Encodable in one load/store instruction.
+ LoadStore(rt, addr, op);
+ }
+}
+
+
+void MacroAssembler::Load(const Register& rt,
+ const MemOperand& addr,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+
+ if (r.IsInteger8()) {
+ Ldrsb(rt, addr);
+ } else if (r.IsUInteger8()) {
+ Ldrb(rt, addr);
+ } else if (r.IsInteger16()) {
+ Ldrsh(rt, addr);
+ } else if (r.IsUInteger16()) {
+ Ldrh(rt, addr);
+ } else if (r.IsInteger32()) {
+ Ldr(rt.W(), addr);
+ } else {
+ ASSERT(rt.Is64Bits());
+ Ldr(rt, addr);
+ }
+}
+
+
+void MacroAssembler::Store(const Register& rt,
+ const MemOperand& addr,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ Strb(rt, addr);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ Strh(rt, addr);
+ } else if (r.IsInteger32()) {
+ Str(rt.W(), addr);
+ } else {
+ ASSERT(rt.Is64Bits());
+ Str(rt, addr);
+ }
+}
+
+
+bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
+ Label *label, ImmBranchType b_type) {
+ bool need_longer_range = false;
+ // There are two situations in which we care about the offset being out of
+ // range:
+ // - The label is bound but too far away.
+ // - The label is not bound but linked, and the previous branch
+ // instruction in the chain is too far away.
+ if (label->is_bound() || label->is_linked()) {
+ need_longer_range =
+ !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
+ }
+ if (!need_longer_range && !label->is_bound()) {
+ int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
+ unresolved_branches_.insert(
+ std::pair<int, FarBranchInfo>(max_reachable_pc,
+ FarBranchInfo(pc_offset(), label)));
+ // Also maintain the next pool check.
+ next_veneer_pool_check_ =
+ Min(next_veneer_pool_check_,
+ max_reachable_pc - kVeneerDistanceCheckMargin);
+ }
+ return need_longer_range;
+}
+
+
+void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
+ ASSERT((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
+ (bit == -1 || type >= kBranchTypeFirstUsingBit));
+ if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
+ B(static_cast<Condition>(type), label);
+ } else {
+ switch (type) {
+ case always: B(label); break;
+ case never: break;
+ case reg_zero: Cbz(reg, label); break;
+ case reg_not_zero: Cbnz(reg, label); break;
+ case reg_bit_clear: Tbz(reg, bit, label); break;
+ case reg_bit_set: Tbnz(reg, bit, label); break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+
+void MacroAssembler::B(Label* label, Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT((cond != al) && (cond != nv));
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
+
+ if (need_extra_instructions) {
+ b(&done, InvertCondition(cond));
+ B(label);
+ } else {
+ b(label, cond);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
+
+ if (need_extra_instructions) {
+ tbz(rt, bit_pos, &done);
+ B(label);
+ } else {
+ tbnz(rt, bit_pos, label);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
+
+ if (need_extra_instructions) {
+ tbnz(rt, bit_pos, &done);
+ B(label);
+ } else {
+ tbz(rt, bit_pos, label);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::Cbnz(const Register& rt, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
+
+ if (need_extra_instructions) {
+ cbz(rt, &done);
+ B(label);
+ } else {
+ cbnz(rt, label);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::Cbz(const Register& rt, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
+
+ if (need_extra_instructions) {
+ cbnz(rt, &done);
+ B(label);
+ } else {
+ cbz(rt, label);
+ }
+ bind(&done);
+}
+
+
+// Pseudo-instructions.
+
+
+void MacroAssembler::Abs(const Register& rd, const Register& rm,
+ Label* is_not_representable,
+ Label* is_representable) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(AreSameSizeAndType(rd, rm));
+
+ Cmp(rm, 1);
+ Cneg(rd, rm, lt);
+
+ // If the comparison sets the v flag, the input was the smallest value
+ // representable by rm, and the mathematical result of abs(rm) is not
+ // representable using two's complement.
+ if ((is_not_representable != NULL) && (is_representable != NULL)) {
+ B(is_not_representable, vs);
+ B(is_representable);
+ } else if (is_not_representable != NULL) {
+ B(is_not_representable, vs);
+ } else if (is_representable != NULL) {
+ B(is_representable, vc);
+ }
+}
+
+
+// Abstracted stack operations.
+
+
+void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3) {
+ ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+
+ int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
+ int size = src0.SizeInBytes();
+
+ PrepareForPush(count, size);
+ PushHelper(count, size, src0, src1, src2, src3);
+}
+
+
+void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3,
+ const CPURegister& src4, const CPURegister& src5,
+ const CPURegister& src6, const CPURegister& src7) {
+ ASSERT(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
+
+ int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
+ int size = src0.SizeInBytes();
+
+ PrepareForPush(count, size);
+ PushHelper(4, size, src0, src1, src2, src3);
+ PushHelper(count - 4, size, src4, src5, src6, src7);
+}
+
+
+void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3) {
+ // It is not valid to pop into the same register more than once in one
+ // instruction, not even into the zero register.
+ ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
+ ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ ASSERT(dst0.IsValid());
+
+ int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
+ int size = dst0.SizeInBytes();
+
+ PrepareForPop(count, size);
+ PopHelper(count, size, dst0, dst1, dst2, dst3);
+
+ if (!csp.Is(StackPointer()) && emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::PushPopQueue::PushQueued() {
+ if (queued_.empty()) return;
+
+ masm_->PrepareForPush(size_);
+
+ int count = queued_.size();
+ int index = 0;
+ while (index < count) {
+ // PushHelper can only handle registers with the same size and type, and it
+ // can handle only four at a time. Batch them up accordingly.
+ CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
+ int batch_index = 0;
+ do {
+ batch[batch_index++] = queued_[index++];
+ } while ((batch_index < 4) && (index < count) &&
+ batch[0].IsSameSizeAndType(queued_[index]));
+
+ masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
+ batch[0], batch[1], batch[2], batch[3]);
+ }
+
+ queued_.clear();
+}
+
+
+void MacroAssembler::PushPopQueue::PopQueued() {
+ if (queued_.empty()) return;
+
+ masm_->PrepareForPop(size_);
+
+ int count = queued_.size();
+ int index = 0;
+ while (index < count) {
+ // PopHelper can only handle registers with the same size and type, and it
+ // can handle only four at a time. Batch them up accordingly.
+ CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
+ int batch_index = 0;
+ do {
+ batch[batch_index++] = queued_[index++];
+ } while ((batch_index < 4) && (index < count) &&
+ batch[0].IsSameSizeAndType(queued_[index]));
+
+ masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
+ batch[0], batch[1], batch[2], batch[3]);
+ }
+
+ queued_.clear();
+}
+
+
+void MacroAssembler::PushCPURegList(CPURegList registers) {
+ int size = registers.RegisterSizeInBytes();
+
+ PrepareForPush(registers.Count(), size);
+ // Push up to four registers at a time because if the current stack pointer is
+ // csp and reg_size is 32, registers must be pushed in blocks of four in order
+ // to maintain the 16-byte alignment for csp.
+ while (!registers.IsEmpty()) {
+ int count_before = registers.Count();
+ const CPURegister& src0 = registers.PopHighestIndex();
+ const CPURegister& src1 = registers.PopHighestIndex();
+ const CPURegister& src2 = registers.PopHighestIndex();
+ const CPURegister& src3 = registers.PopHighestIndex();
+ int count = count_before - registers.Count();
+ PushHelper(count, size, src0, src1, src2, src3);
+ }
+}
+
+
+void MacroAssembler::PopCPURegList(CPURegList registers) {
+ int size = registers.RegisterSizeInBytes();
+
+ PrepareForPop(registers.Count(), size);
+ // Pop up to four registers at a time because if the current stack pointer is
+ // csp and reg_size is 32, registers must be pushed in blocks of four in
+ // order to maintain the 16-byte alignment for csp.
+ while (!registers.IsEmpty()) {
+ int count_before = registers.Count();
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ const CPURegister& dst1 = registers.PopLowestIndex();
+ const CPURegister& dst2 = registers.PopLowestIndex();
+ const CPURegister& dst3 = registers.PopLowestIndex();
+ int count = count_before - registers.Count();
+ PopHelper(count, size, dst0, dst1, dst2, dst3);
+ }
+
+ if (!csp.Is(StackPointer()) && emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ Mov(csp, StackPointer());
+ }
+}
+
+
+void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
+ int size = src.SizeInBytes();
+
+ PrepareForPush(count, size);
+
+ if (FLAG_optimize_for_size && count > 8) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ Label loop;
+ __ Mov(temp, count / 2);
+ __ Bind(&loop);
+ PushHelper(2, size, src, src, NoReg, NoReg);
+ __ Subs(temp, temp, 1);
+ __ B(ne, &loop);
+
+ count %= 2;
+ }
+
+ // Push up to four registers at a time if possible because if the current
+ // stack pointer is csp and the register size is 32, registers must be pushed
+ // in blocks of four in order to maintain the 16-byte alignment for csp.
+ while (count >= 4) {
+ PushHelper(4, size, src, src, src, src);
+ count -= 4;
+ }
+ if (count >= 2) {
+ PushHelper(2, size, src, src, NoReg, NoReg);
+ count -= 2;
+ }
+ if (count == 1) {
+ PushHelper(1, size, src, NoReg, NoReg, NoReg);
+ count -= 1;
+ }
+ ASSERT(count == 0);
+}
+
+
+void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
+ PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(count);
+
+ if (FLAG_optimize_for_size) {
+ Label loop, done;
+
+ Subs(temp, count, 1);
+ B(mi, &done);
+
+ // Push all registers individually, to save code size.
+ Bind(&loop);
+ Subs(temp, temp, 1);
+ PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
+ B(pl, &loop);
+
+ Bind(&done);
+ } else {
+ Label loop, leftover2, leftover1, done;
+
+ Subs(temp, count, 4);
+ B(mi, &leftover2);
+
+ // Push groups of four first.
+ Bind(&loop);
+ Subs(temp, temp, 4);
+ PushHelper(4, src.SizeInBytes(), src, src, src, src);
+ B(pl, &loop);
+
+ // Push groups of two.
+ Bind(&leftover2);
+ Tbz(count, 1, &leftover1);
+ PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
+
+ // Push the last one (if required).
+ Bind(&leftover1);
+ Tbz(count, 0, &done);
+ PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
+
+ Bind(&done);
+ }
+}
+
+
+void MacroAssembler::PushHelper(int count, int size,
+ const CPURegister& src0,
+ const CPURegister& src1,
+ const CPURegister& src2,
+ const CPURegister& src3) {
+ // Ensure that we don't unintentially modify scratch or debug registers.
+ InstructionAccurateScope scope(this);
+
+ ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+ ASSERT(size == src0.SizeInBytes());
+
+ // When pushing multiple registers, the store order is chosen such that
+ // Push(a, b) is equivalent to Push(a) followed by Push(b).
+ switch (count) {
+ case 1:
+ ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
+ str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
+ break;
+ case 2:
+ ASSERT(src2.IsNone() && src3.IsNone());
+ stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
+ break;
+ case 3:
+ ASSERT(src3.IsNone());
+ stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
+ str(src0, MemOperand(StackPointer(), 2 * size));
+ break;
+ case 4:
+ // Skip over 4 * size, then fill in the gap. This allows four W registers
+ // to be pushed using csp, whilst maintaining 16-byte alignment for csp
+ // at all times.
+ stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
+ stp(src1, src0, MemOperand(StackPointer(), 2 * size));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::PopHelper(int count, int size,
+ const CPURegister& dst0,
+ const CPURegister& dst1,
+ const CPURegister& dst2,
+ const CPURegister& dst3) {
+ // Ensure that we don't unintentially modify scratch or debug registers.
+ InstructionAccurateScope scope(this);
+
+ ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ ASSERT(size == dst0.SizeInBytes());
+
+ // When popping multiple registers, the load order is chosen such that
+ // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
+ switch (count) {
+ case 1:
+ ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
+ ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
+ break;
+ case 2:
+ ASSERT(dst2.IsNone() && dst3.IsNone());
+ ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
+ break;
+ case 3:
+ ASSERT(dst3.IsNone());
+ ldr(dst2, MemOperand(StackPointer(), 2 * size));
+ ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
+ break;
+ case 4:
+ // Load the higher addresses first, then load the lower addresses and
+ // skip the whole block in the second instruction. This allows four W
+ // registers to be popped using csp, whilst maintaining 16-byte alignment
+ // for csp at all times.
+ ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
+ ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::PrepareForPush(Operand total_size) {
+ // TODO(jbramley): This assertion generates too much code in some debug tests.
+ // AssertStackConsistency();
+ if (csp.Is(StackPointer())) {
+ // If the current stack pointer is csp, then it must be aligned to 16 bytes
+ // on entry and the total size of the specified registers must also be a
+ // multiple of 16 bytes.
+ if (total_size.IsImmediate()) {
+ ASSERT((total_size.immediate() % 16) == 0);
+ }
+
+ // Don't check access size for non-immediate sizes. It's difficult to do
+ // well, and it will be caught by hardware (or the simulator) anyway.
+ } else {
+ // Even if the current stack pointer is not the system stack pointer (csp),
+ // the system stack pointer will still be modified in order to comply with
+ // ABI rules about accessing memory below the system stack pointer.
+ BumpSystemStackPointer(total_size);
+ }
+}
+
+
+void MacroAssembler::PrepareForPop(Operand total_size) {
+ AssertStackConsistency();
+ if (csp.Is(StackPointer())) {
+ // If the current stack pointer is csp, then it must be aligned to 16 bytes
+ // on entry and the total size of the specified registers must also be a
+ // multiple of 16 bytes.
+ if (total_size.IsImmediate()) {
+ ASSERT((total_size.immediate() % 16) == 0);
+ }
+
+ // Don't check access size for non-immediate sizes. It's difficult to do
+ // well, and it will be caught by hardware (or the simulator) anyway.
+ }
+}
+
+
+void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
+ if (offset.IsImmediate()) {
+ ASSERT(offset.immediate() >= 0);
+ } else if (emit_debug_code()) {
+ Cmp(xzr, offset);
+ Check(le, kStackAccessBelowStackPointer);
+ }
+
+ Str(src, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
+ if (offset.IsImmediate()) {
+ ASSERT(offset.immediate() >= 0);
+ } else if (emit_debug_code()) {
+ Cmp(xzr, offset);
+ Check(le, kStackAccessBelowStackPointer);
+ }
+
+ Ldr(dst, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::PokePair(const CPURegister& src1,
+ const CPURegister& src2,
+ int offset) {
+ ASSERT(AreSameSizeAndType(src1, src2));
+ ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
+ Stp(src1, src2, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::PeekPair(const CPURegister& dst1,
+ const CPURegister& dst2,
+ int offset) {
+ ASSERT(AreSameSizeAndType(dst1, dst2));
+ ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
+ Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
+}
+
+
+void MacroAssembler::PushCalleeSavedRegisters() {
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ InstructionAccurateScope scope(this);
+
+ // This method must not be called unless the current stack pointer is the
+ // system stack pointer (csp).
+ ASSERT(csp.Is(StackPointer()));
+
+ MemOperand tos(csp, -2 * kXRegSize, PreIndex);
+
+ stp(d14, d15, tos);
+ stp(d12, d13, tos);
+ stp(d10, d11, tos);
+ stp(d8, d9, tos);
+
+ stp(x29, x30, tos);
+ stp(x27, x28, tos); // x28 = jssp
+ stp(x25, x26, tos);
+ stp(x23, x24, tos);
+ stp(x21, x22, tos);
+ stp(x19, x20, tos);
+}
+
+
+void MacroAssembler::PopCalleeSavedRegisters() {
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ InstructionAccurateScope scope(this);
+
+ // This method must not be called unless the current stack pointer is the
+ // system stack pointer (csp).
+ ASSERT(csp.Is(StackPointer()));
+
+ MemOperand tos(csp, 2 * kXRegSize, PostIndex);
+
+ ldp(x19, x20, tos);
+ ldp(x21, x22, tos);
+ ldp(x23, x24, tos);
+ ldp(x25, x26, tos);
+ ldp(x27, x28, tos); // x28 = jssp
+ ldp(x29, x30, tos);
+
+ ldp(d8, d9, tos);
+ ldp(d10, d11, tos);
+ ldp(d12, d13, tos);
+ ldp(d14, d15, tos);
+}
+
+
+void MacroAssembler::AssertStackConsistency() {
+ if (emit_debug_code()) {
+ if (csp.Is(StackPointer())) {
+ // We can't check the alignment of csp without using a scratch register
+ // (or clobbering the flags), but the processor (or simulator) will abort
+ // if it is not properly aligned during a load.
+ ldr(xzr, MemOperand(csp, 0));
+ } else if (FLAG_enable_slow_asserts) {
+ Label ok;
+ // Check that csp <= StackPointer(), preserving all registers and NZCV.
+ sub(StackPointer(), csp, StackPointer());
+ cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
+ tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
+
+ Abort(kTheCurrentStackPointerIsBelowCsp);
+
+ bind(&ok);
+ // Restore StackPointer().
+ sub(StackPointer(), csp, StackPointer());
+ }
+ }
+}
+
+
+void MacroAssembler::LoadRoot(Register destination,
+ Heap::RootListIndex index) {
+ // TODO(jbramley): Most root values are constants, and can be synthesized
+ // without a load. Refer to the ARM back end for details.
+ Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::StoreRoot(Register source,
+ Heap::RootListIndex index) {
+ Str(source, MemOperand(root, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::LoadTrueFalseRoots(Register true_root,
+ Register false_root) {
+ STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
+ Ldp(true_root, false_root,
+ MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ AllowDeferredHandleDereference using_raw_address;
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ Mov(result, Operand(cell));
+ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+ } else {
+ Mov(result, Operand(object));
+ }
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+ Register descriptors) {
+ Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
+}
+
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+ Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+
+void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ Ldrsw(dst, UntagSmiFieldMemOperand(map, Map::kBitField3Offset));
+ And(dst, dst, Map::EnumLengthBits::kMask);
+}
+
+
+void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ And(dst, dst, Smi::FromInt(Map::EnumLengthBits::kMask));
+}
+
+
+void MacroAssembler::CheckEnumCache(Register object,
+ Register null_value,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* call_runtime) {
+ ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
+ scratch3));
+
+ Register empty_fixed_array_value = scratch0;
+ Register current_object = scratch1;
+
+ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+ Label next, start;
+
+ Mov(current_object, object);
+
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
+ Register map = scratch2;
+ Register enum_length = scratch3;
+ Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
+
+ EnumLengthUntagged(enum_length, map);
+ Cmp(enum_length, kInvalidEnumCacheSentinel);
+ B(eq, call_runtime);
+
+ B(&start);
+
+ Bind(&next);
+ Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
+
+ // For all objects but the receiver, check that the cache is empty.
+ EnumLengthUntagged(enum_length, map);
+ Cbnz(enum_length, call_runtime);
+
+ Bind(&start);
+
+ // Check that there are no elements. Register current_object contains the
+ // current JS object we've reached through the prototype chain.
+ Label no_elements;
+ Ldr(current_object, FieldMemOperand(current_object,
+ JSObject::kElementsOffset));
+ Cmp(current_object, empty_fixed_array_value);
+ B(eq, &no_elements);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
+ B(ne, call_runtime);
+
+ Bind(&no_elements);
+ Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
+ Cmp(current_object, null_value);
+ B(ne, &next);
+}
+
+
+void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* no_memento_found) {
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ Add(scratch1, receiver,
+ JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag);
+ Cmp(scratch1, new_space_start);
+ B(lt, no_memento_found);
+
+ Mov(scratch2, new_space_allocation_top);
+ Ldr(scratch2, MemOperand(scratch2));
+ Cmp(scratch1, scratch2);
+ B(gt, no_memento_found);
+
+ Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
+ Cmp(scratch1,
+ Operand(isolate()->factory()->allocation_memento_map()));
+}
+
+
+void MacroAssembler::JumpToHandlerEntry(Register exception,
+ Register object,
+ Register state,
+ Register scratch1,
+ Register scratch2) {
+ // Handler expects argument in x0.
+ ASSERT(exception.Is(x0));
+
+ // Compute the handler entry address and jump to it. The handler table is
+ // a fixed array of (smi-tagged) code offsets.
+ Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset));
+ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
+ STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2);
+ Lsr(scratch2, state, StackHandler::kKindWidth);
+ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+ Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag);
+ Add(scratch1, scratch1, Operand::UntagSmi(scratch2));
+ Br(scratch1);
+}
+
+
+void MacroAssembler::InNewSpace(Register object,
+ Condition cond,
+ Label* branch) {
+ ASSERT(cond == eq || cond == ne);
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ And(temp, object, ExternalReference::new_space_mask(isolate()));
+ Cmp(temp, ExternalReference::new_space_start(isolate()));
+ B(cond, branch);
+}
+
+
+void MacroAssembler::Throw(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The handler expects the exception in x0.
+ ASSERT(value.Is(x0));
+
+ // Drop the stack pointer to the top of the top handler.
+ ASSERT(jssp.Is(StackPointer()));
+ Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
+ isolate())));
+ Ldr(jssp, MemOperand(scratch1));
+ // Restore the next handler.
+ Pop(scratch2);
+ Str(scratch2, MemOperand(scratch1));
+
+ // Get the code object and state. Restore the context and frame pointer.
+ Register object = scratch1;
+ Register state = scratch2;
+ Pop(object, state, cp, fp);
+
+ // If the handler is a JS frame, restore the context to the frame.
+ // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
+ // or cp.
+ Label not_js_frame;
+ Cbz(cp, &not_js_frame);
+ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ Bind(&not_js_frame);
+
+ JumpToHandlerEntry(value, object, state, scratch3, scratch4);
+}
+
+
+void MacroAssembler::ThrowUncatchable(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The handler expects the exception in x0.
+ ASSERT(value.Is(x0));
+
+ // Drop the stack pointer to the top of the top stack handler.
+ ASSERT(jssp.Is(StackPointer()));
+ Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
+ isolate())));
+ Ldr(jssp, MemOperand(scratch1));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label fetch_next, check_kind;
+ B(&check_kind);
+ Bind(&fetch_next);
+ Peek(jssp, StackHandlerConstants::kNextOffset);
+
+ Bind(&check_kind);
+ STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
+ Peek(scratch2, StackHandlerConstants::kStateOffset);
+ TestAndBranchIfAnySet(scratch2, StackHandler::KindField::kMask, &fetch_next);
+
+ // Set the top handler address to next handler past the top ENTRY handler.
+ Pop(scratch2);
+ Str(scratch2, MemOperand(scratch1));
+
+ // Get the code object and state. Clear the context and frame pointer (0 was
+ // saved in the handler).
+ Register object = scratch1;
+ Register state = scratch2;
+ Pop(object, state, cp, fp);
+
+ JumpToHandlerEntry(value, object, state, scratch3, scratch4);
+}
+
+
+void MacroAssembler::Throw(BailoutReason reason) {
+ Label throw_start;
+ Bind(&throw_start);
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ RecordComment("Throw message: ");
+ RecordComment((msg != NULL) ? msg : "UNKNOWN");
+#endif
+
+ Mov(x0, Smi::FromInt(reason));
+ Push(x0);
+
+ // Disable stub call restrictions to always allow calls to throw.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
+ } else {
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
+ }
+ // ThrowMessage should not return here.
+ Unreachable();
+}
+
+
+void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
+ Label ok;
+ B(InvertCondition(cc), &ok);
+ Throw(reason);
+ Bind(&ok);
+}
+
+
+void MacroAssembler::ThrowIfSmi(const Register& value, BailoutReason reason) {
+ Label ok;
+ JumpIfNotSmi(value, &ok);
+ Throw(reason);
+ Bind(&ok);
+}
+
+
+void MacroAssembler::SmiAbs(const Register& smi, Label* slow) {
+ ASSERT(smi.Is64Bits());
+ Abs(smi, smi, slow);
+}
+
+
+void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(eq, reason);
+ }
+}
+
+
+void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(ne, reason);
+ }
+}
+
+
+void MacroAssembler::AssertName(Register object) {
+ if (emit_debug_code()) {
+ AssertNotSmi(object, kOperandIsASmiAndNotAName);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(temp, temp, LAST_NAME_TYPE);
+ Check(ls, kOperandIsNotAName);
+ }
+}
+
+
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
+ Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ Assert(eq, kExpectedUndefinedOrCell);
+ Bind(&done_checking);
+ }
+}
+
+
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(ne, kOperandIsASmiAndNotAString);
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
+ Check(lo, kOperandIsNotAString);
+ }
+}
+
+
+void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
+ ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
+}
+
+
+void MacroAssembler::TailCallStub(CodeStub* stub) {
+ Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
+ // All arguments must be on the stack before this function is called.
+ // x0 holds the return value after the call.
+
+ // Check that the number of arguments matches what the function expects.
+ // If f->nargs is -1, the function can accept a variable number of arguments.
+ if (f->nargs >= 0 && f->nargs != num_arguments) {
+ // Illegal operation: drop the stack arguments and return undefined.
+ if (num_arguments > 0) {
+ Drop(num_arguments);
+ }
+ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ return;
+ }
+
+ // Place the necessary arguments.
+ Mov(x0, num_arguments);
+ Mov(x1, ExternalReference(f, isolate()));
+
+ CEntryStub stub(1, save_doubles);
+ CallStub(&stub);
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+
+void MacroAssembler::CallApiFunctionAndReturn(
+ Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ int spill_offset,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
+ ASM_LOCATION("CallApiFunctionAndReturn");
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate());
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate()),
+ next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate()),
+ next_address);
+
+ ASSERT(function_address.is(x1) || function_address.is(x2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ bool* is_profiling_flag = isolate()->cpu_profiler()->is_profiling_address();
+ STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
+ Mov(x10, reinterpret_cast<uintptr_t>(is_profiling_flag));
+ Ldrb(w10, MemOperand(x10));
+ Cbz(w10, &profiler_disabled);
+ Mov(x3, thunk_ref);
+ B(&end_profiler_check);
+
+ Bind(&profiler_disabled);
+ Mov(x3, function_address);
+ Bind(&end_profiler_check);
+
+ // Save the callee-save registers we are going to use.
+ // TODO(all): Is this necessary? ARM doesn't do it.
+ STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
+ Poke(x19, (spill_offset + 0) * kXRegSize);
+ Poke(x20, (spill_offset + 1) * kXRegSize);
+ Poke(x21, (spill_offset + 2) * kXRegSize);
+ Poke(x22, (spill_offset + 3) * kXRegSize);
+
+ // Allocate HandleScope in callee-save registers.
+ // We will need to restore the HandleScope after the call to the API function,
+ // by allocating it in callee-save registers they will be preserved by C code.
+ Register handle_scope_base = x22;
+ Register next_address_reg = x19;
+ Register limit_reg = x20;
+ Register level_reg = w21;
+
+ Mov(handle_scope_base, next_address);
+ Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
+ Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
+ Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+ Add(level_reg, level_reg, 1);
+ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ Mov(x0, ExternalReference::isolate_address(isolate()));
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub;
+ stub.GenerateCall(this, x3);
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ Mov(x0, ExternalReference::isolate_address(isolate()));
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ Label promote_scheduled_exception;
+ Label exception_handled;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // Load value from ReturnValue.
+ Ldr(x0, return_value_operand);
+ Bind(&return_value_loaded);
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
+ if (emit_debug_code()) {
+ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
+ Cmp(w1, level_reg);
+ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+ }
+ Sub(level_reg, level_reg, 1);
+ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
+ Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
+ Cmp(limit_reg, x1);
+ B(ne, &delete_allocated_handles);
+
+ Bind(&leave_exit_frame);
+ // Restore callee-saved registers.
+ Peek(x19, (spill_offset + 0) * kXRegSize);
+ Peek(x20, (spill_offset + 1) * kXRegSize);
+ Peek(x21, (spill_offset + 2) * kXRegSize);
+ Peek(x22, (spill_offset + 3) * kXRegSize);
+
+ // Check if the function scheduled an exception.
+ Mov(x5, ExternalReference::scheduled_exception_address(isolate()));
+ Ldr(x5, MemOperand(x5));
+ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception);
+ Bind(&exception_handled);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ Ldr(cp, *context_restore_operand);
+ }
+
+ LeaveExitFrame(false, x1, !restore_context);
+ Drop(stack_space);
+ Ret();
+
+ Bind(&promote_scheduled_exception);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallExternalReference(
+ ExternalReference(
+ Runtime::kHiddenPromoteScheduledException, isolate()), 0);
+ }
+ B(&exception_handled);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ Bind(&delete_allocated_handles);
+ Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
+ // Save the return value in a callee-save register.
+ Register saved_result = x19;
+ Mov(saved_result, x0);
+ Mov(x0, ExternalReference::isolate_address(isolate()));
+ CallCFunction(
+ ExternalReference::delete_handle_scope_extensions(isolate()), 1);
+ Mov(x0, saved_result);
+ B(&leave_exit_frame);
+}
+
+
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+ int num_arguments) {
+ Mov(x0, num_arguments);
+ Mov(x1, ext);
+
+ CEntryStub stub(1);
+ CallStub(&stub);
+}
+
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
+ Mov(x1, builtin);
+ CEntryStub stub(1);
+ Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::GetBuiltinFunction(Register target,
+ Builtins::JavaScript id) {
+ // Load the builtins object into target register.
+ Ldr(target, GlobalObjectMemOperand());
+ Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+ // Load the JavaScript builtin function from the builtins object.
+ Ldr(target, FieldMemOperand(target,
+ JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target,
+ Register function,
+ Builtins::JavaScript id) {
+ ASSERT(!AreAliased(target, function));
+ GetBuiltinFunction(function, id);
+ // Load the code entry point from the builtins object.
+ Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ ASM_LOCATION("MacroAssembler::InvokeBuiltin");
+ // You can't call a builtin without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ // Get the builtin entry in x2 and setup the function object in x1.
+ GetBuiltinEntry(x2, x1, id);
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(x2));
+ Call(x2);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ Jump(x2);
+ }
+}
+
+
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ Mov(x0, num_arguments);
+ JumpToExternalReference(ext);
+}
+
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size) {
+ TailCallExternalReference(ExternalReference(fid, isolate()),
+ num_arguments,
+ result_size);
+}
+
+
+void MacroAssembler::InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(!AreAliased(string, length, scratch1, scratch2));
+ LoadRoot(scratch2, map_index);
+ SmiTag(scratch1, length);
+ Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
+
+ Mov(scratch2, String::kEmptyHashField);
+ Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
+ Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
+}
+
+
+int MacroAssembler::ActivationFrameAlignment() {
+#if V8_HOST_ARCH_ARM64
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one ARM
+ // platform for another ARM platform with a different alignment.
+ return OS::ActivationFrameAlignment();
+#else // V8_HOST_ARCH_ARM64
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so this is controlled from a
+ // flag.
+ return FLAG_sim_stack_alignment;
+#endif // V8_HOST_ARCH_ARM64
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_of_reg_args) {
+ CallCFunction(function, num_of_reg_args, 0);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_of_reg_args,
+ int num_of_double_args) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Mov(temp, function);
+ CallCFunction(temp, num_of_reg_args, num_of_double_args);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+ int num_of_reg_args,
+ int num_of_double_args) {
+ ASSERT(has_frame());
+ // We can pass 8 integer arguments in registers. If we need to pass more than
+ // that, we'll need to implement support for passing them on the stack.
+ ASSERT(num_of_reg_args <= 8);
+
+ // If we're passing doubles, we're limited to the following prototypes
+ // (defined by ExternalReference::Type):
+ // BUILTIN_COMPARE_CALL: int f(double, double)
+ // BUILTIN_FP_FP_CALL: double f(double, double)
+ // BUILTIN_FP_CALL: double f(double)
+ // BUILTIN_FP_INT_CALL: double f(double, int)
+ if (num_of_double_args > 0) {
+ ASSERT(num_of_reg_args <= 1);
+ ASSERT((num_of_double_args + num_of_reg_args) <= 2);
+ }
+
+
+ // If the stack pointer is not csp, we need to derive an aligned csp from the
+ // current stack pointer.
+ const Register old_stack_pointer = StackPointer();
+ if (!csp.Is(old_stack_pointer)) {
+ AssertStackConsistency();
+
+ int sp_alignment = ActivationFrameAlignment();
+ // The ABI mandates at least 16-byte alignment.
+ ASSERT(sp_alignment >= 16);
+ ASSERT(IsPowerOf2(sp_alignment));
+
+ // The current stack pointer is a callee saved register, and is preserved
+ // across the call.
+ ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
+
+ // Align and synchronize the system stack pointer with jssp.
+ Bic(csp, old_stack_pointer, sp_alignment - 1);
+ SetStackPointer(csp);
+ }
+
+ // Call directly. The function called cannot cause a GC, or allow preemption,
+ // so the return address in the link register stays correct.
+ Call(function);
+
+ if (!csp.Is(old_stack_pointer)) {
+ if (emit_debug_code()) {
+ // Because the stack pointer must be aligned on a 16-byte boundary, the
+ // aligned csp can be up to 12 bytes below the jssp. This is the case
+ // where we only pushed one W register on top of an aligned jssp.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ ASSERT(ActivationFrameAlignment() == 16);
+ Sub(temp, csp, old_stack_pointer);
+ // We want temp <= 0 && temp >= -12.
+ Cmp(temp, 0);
+ Ccmp(temp, -12, NFlag, le);
+ Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
+ }
+ SetStackPointer(old_stack_pointer);
+ }
+}
+
+
+void MacroAssembler::Jump(Register target) {
+ Br(target);
+}
+
+
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Mov(temp, Operand(target, rmode));
+ Br(temp);
+}
+
+
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ Jump(reinterpret_cast<intptr_t>(target), rmode);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ AllowDeferredHandleDereference embedding_raw_address;
+ Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
+}
+
+
+void MacroAssembler::Call(Register target) {
+ BlockPoolsScope scope(this);
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+
+ Blr(target);
+
+#ifdef DEBUG
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
+#endif
+}
+
+
+void MacroAssembler::Call(Label* target) {
+ BlockPoolsScope scope(this);
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+
+ Bl(target);
+
+#ifdef DEBUG
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
+#endif
+}
+
+
+// MacroAssembler::CallSize is sensitive to changes in this function, as it
+// requires to know how many instructions are used to branch to the target.
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
+ BlockPoolsScope scope(this);
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+ // Statement positions are expected to be recorded when the target
+ // address is loaded.
+ positions_recorder()->WriteRecordedPositions();
+
+ // Addresses always have 64 bits, so we shouldn't encounter NONE32.
+ ASSERT(rmode != RelocInfo::NONE32);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ if (rmode == RelocInfo::NONE64) {
+ // Addresses are 48 bits so we never need to load the upper 16 bits.
+ uint64_t imm = reinterpret_cast<uint64_t>(target);
+ // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
+ ASSERT(((imm >> 48) & 0xffff) == 0);
+ movz(temp, (imm >> 0) & 0xffff, 0);
+ movk(temp, (imm >> 16) & 0xffff, 16);
+ movk(temp, (imm >> 32) & 0xffff, 32);
+ } else {
+ LoadRelocated(temp, Operand(reinterpret_cast<intptr_t>(target), rmode));
+ }
+ Blr(temp);
+#ifdef DEBUG
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
+#endif
+}
+
+
+void MacroAssembler::Call(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id) {
+#ifdef DEBUG
+ Label start_call;
+ Bind(&start_call);
+#endif
+
+ if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
+ SetRecordedAstId(ast_id);
+ rmode = RelocInfo::CODE_TARGET_WITH_ID;
+ }
+
+ AllowDeferredHandleDereference embedding_raw_address;
+ Call(reinterpret_cast<Address>(code.location()), rmode);
+
+#ifdef DEBUG
+ // Check the size of the code generated.
+ AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
+#endif
+}
+
+
+int MacroAssembler::CallSize(Register target) {
+ USE(target);
+ return kInstructionSize;
+}
+
+
+int MacroAssembler::CallSize(Label* target) {
+ USE(target);
+ return kInstructionSize;
+}
+
+
+int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
+ USE(target);
+
+ // Addresses always have 64 bits, so we shouldn't encounter NONE32.
+ ASSERT(rmode != RelocInfo::NONE32);
+
+ if (rmode == RelocInfo::NONE64) {
+ return kCallSizeWithoutRelocation;
+ } else {
+ return kCallSizeWithRelocation;
+ }
+}
+
+
+int MacroAssembler::CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ TypeFeedbackId ast_id) {
+ USE(code);
+ USE(ast_id);
+
+ // Addresses always have 64 bits, so we shouldn't encounter NONE32.
+ ASSERT(rmode != RelocInfo::NONE32);
+
+ if (rmode == RelocInfo::NONE64) {
+ return kCallSizeWithoutRelocation;
+ } else {
+ return kCallSizeWithRelocation;
+ }
+}
+
+
+
+
+
+void MacroAssembler::JumpForHeapNumber(Register object,
+ Register heap_number_map,
+ Label* on_heap_number,
+ Label* on_not_heap_number) {
+ ASSERT(on_heap_number || on_not_heap_number);
+ AssertNotSmi(object);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ // Load the HeapNumber map if it is not passed.
+ if (heap_number_map.Is(NoReg)) {
+ heap_number_map = temps.AcquireX();
+ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ } else {
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ }
+
+ ASSERT(!AreAliased(temp, heap_number_map));
+
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ Cmp(temp, heap_number_map);
+
+ if (on_heap_number) {
+ B(eq, on_heap_number);
+ }
+ if (on_not_heap_number) {
+ B(ne, on_not_heap_number);
+ }
+}
+
+
+void MacroAssembler::JumpIfHeapNumber(Register object,
+ Label* on_heap_number,
+ Register heap_number_map) {
+ JumpForHeapNumber(object,
+ heap_number_map,
+ on_heap_number,
+ NULL);
+}
+
+
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+ Label* on_not_heap_number,
+ Register heap_number_map) {
+ JumpForHeapNumber(object,
+ heap_number_map,
+ NULL,
+ on_not_heap_number);
+}
+
+
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3));
+
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ Ldrsw(mask, UntagSmiFieldMemOperand(number_string_cache,
+ FixedArray::kLengthOffset));
+ Asr(mask, mask, 1); // Divide length by two.
+ Sub(mask, mask, 1); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(kDoubleSize == (kWRegSize * 2));
+ Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
+ Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1));
+ Eor(scratch1, scratch1, scratch2);
+ And(scratch1, scratch1, mask);
+
+ // Calculate address of entry in string cache: each entry consists of two
+ // pointer sized fields.
+ Add(scratch1, number_string_cache,
+ Operand(scratch1, LSL, kPointerSizeLog2 + 1));
+
+ Register probe = mask;
+ Ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ Ldr(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
+ Ldr(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
+ Fcmp(d0, d1);
+ B(ne, not_found);
+ B(&load_result_from_cache);
+
+ Bind(&is_smi);
+ Register scratch = scratch1;
+ And(scratch, mask, Operand::UntagSmi(object));
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ Add(scratch, number_string_cache,
+ Operand(scratch, LSL, kPointerSizeLog2 + 1));
+
+ // Check if the entry is the smi we are looking for.
+ Ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ Cmp(object, probe);
+ B(ne, not_found);
+
+ // Get the result from the cache.
+ Bind(&load_result_from_cache);
+ Ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
+ scratch1, scratch2);
+}
+
+
+void MacroAssembler::TryConvertDoubleToInt(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion,
+ Label* on_failed_conversion) {
+ // Convert to an int and back again, then compare with the original value.
+ Fcvtzs(as_int, value);
+ Scvtf(scratch_d, as_int);
+ Fcmp(value, scratch_d);
+
+ if (on_successful_conversion) {
+ B(on_successful_conversion, eq);
+ }
+ if (on_failed_conversion) {
+ B(on_failed_conversion, ne);
+ }
+}
+
+
+void MacroAssembler::TestForMinusZero(DoubleRegister input) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
+ // cause overflow.
+ Fmov(temp, input);
+ Cmp(temp, 1);
+}
+
+
+void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
+ Label* on_negative_zero) {
+ TestForMinusZero(input);
+ B(vs, on_negative_zero);
+}
+
+
+void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
+ // Clamp the value to [0..255].
+ Cmp(input.W(), Operand(input.W(), UXTB));
+ // If input < input & 0xff, it must be < 0, so saturate to 0.
+ Csel(output.W(), wzr, input.W(), lt);
+ // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255.
+ Csel(output.W(), output.W(), 255, le);
+}
+
+
+void MacroAssembler::ClampInt32ToUint8(Register in_out) {
+ ClampInt32ToUint8(in_out, in_out);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(Register output,
+ DoubleRegister input,
+ DoubleRegister dbl_scratch) {
+ // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
+ // - Inputs lower than 0 (including -infinity) produce 0.
+ // - Inputs higher than 255 (including +infinity) produce 255.
+ // Also, it seems that PIXEL types use round-to-nearest rather than
+ // round-towards-zero.
+
+ // Squash +infinity before the conversion, since Fcvtnu will normally
+ // convert it to 0.
+ Fmov(dbl_scratch, 255);
+ Fmin(dbl_scratch, dbl_scratch, input);
+
+ // Convert double to unsigned integer. Values less than zero become zero.
+ // Values greater than 255 have already been clamped to 255.
+ Fcvtnu(output, dbl_scratch);
+}
+
+
+void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
+ Register src,
+ unsigned count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5) {
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in a tight loop.
+ ASSERT(!AreAliased(dst, src,
+ scratch1, scratch2, scratch3, scratch4, scratch5));
+ ASSERT(count >= 2);
+
+ const Register& remaining = scratch3;
+ Mov(remaining, count / 2);
+
+ const Register& dst_untagged = scratch1;
+ const Register& src_untagged = scratch2;
+ Sub(dst_untagged, dst, kHeapObjectTag);
+ Sub(src_untagged, src, kHeapObjectTag);
+
+ // Copy fields in pairs.
+ Label loop;
+ Bind(&loop);
+ Ldp(scratch4, scratch5,
+ MemOperand(src_untagged, kXRegSize* 2, PostIndex));
+ Stp(scratch4, scratch5,
+ MemOperand(dst_untagged, kXRegSize* 2, PostIndex));
+ Sub(remaining, remaining, 1);
+ Cbnz(remaining, &loop);
+
+ // Handle the leftovers.
+ if (count & 1) {
+ Ldr(scratch4, MemOperand(src_untagged));
+ Str(scratch4, MemOperand(dst_untagged));
+ }
+}
+
+
+void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
+ Register src,
+ unsigned count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in an unrolled loop.
+ ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
+
+ const Register& dst_untagged = scratch1;
+ const Register& src_untagged = scratch2;
+ sub(dst_untagged, dst, kHeapObjectTag);
+ sub(src_untagged, src, kHeapObjectTag);
+
+ // Copy fields in pairs.
+ for (unsigned i = 0; i < count / 2; i++) {
+ Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex));
+ Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex));
+ }
+
+ // Handle the leftovers.
+ if (count & 1) {
+ Ldr(scratch3, MemOperand(src_untagged));
+ Str(scratch3, MemOperand(dst_untagged));
+ }
+}
+
+
+void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
+ Register src,
+ unsigned count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in an unrolled loop.
+ ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3));
+
+ const Register& dst_untagged = scratch1;
+ const Register& src_untagged = scratch2;
+ Sub(dst_untagged, dst, kHeapObjectTag);
+ Sub(src_untagged, src, kHeapObjectTag);
+
+ // Copy fields one by one.
+ for (unsigned i = 0; i < count; i++) {
+ Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex));
+ Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex));
+ }
+}
+
+
+void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
+ unsigned count) {
+ // One of two methods is used:
+ //
+ // For high 'count' values where many scratch registers are available:
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in a tight loop.
+ //
+ // For low 'count' values or where few scratch registers are available:
+ // Untag src and dst into scratch registers.
+ // Copy src->dst in an unrolled loop.
+ //
+ // In both cases, fields are copied in pairs if possible, and left-overs are
+ // handled separately.
+ ASSERT(!AreAliased(dst, src));
+ ASSERT(!temps.IncludesAliasOf(dst));
+ ASSERT(!temps.IncludesAliasOf(src));
+ ASSERT(!temps.IncludesAliasOf(xzr));
+
+ if (emit_debug_code()) {
+ Cmp(dst, src);
+ Check(ne, kTheSourceAndDestinationAreTheSame);
+ }
+
+ // The value of 'count' at which a loop will be generated (if there are
+ // enough scratch registers).
+ static const unsigned kLoopThreshold = 8;
+
+ UseScratchRegisterScope masm_temps(this);
+ if ((temps.Count() >= 3) && (count >= kLoopThreshold)) {
+ CopyFieldsLoopPairsHelper(dst, src, count,
+ Register(temps.PopLowestIndex()),
+ Register(temps.PopLowestIndex()),
+ Register(temps.PopLowestIndex()),
+ masm_temps.AcquireX(),
+ masm_temps.AcquireX());
+ } else if (temps.Count() >= 2) {
+ CopyFieldsUnrolledPairsHelper(dst, src, count,
+ Register(temps.PopLowestIndex()),
+ Register(temps.PopLowestIndex()),
+ masm_temps.AcquireX(),
+ masm_temps.AcquireX());
+ } else if (temps.Count() == 1) {
+ CopyFieldsUnrolledHelper(dst, src, count,
+ Register(temps.PopLowestIndex()),
+ masm_temps.AcquireX(),
+ masm_temps.AcquireX());
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::CopyBytes(Register dst,
+ Register src,
+ Register length,
+ Register scratch,
+ CopyHint hint) {
+ UseScratchRegisterScope temps(this);
+ Register tmp1 = temps.AcquireX();
+ Register tmp2 = temps.AcquireX();
+ ASSERT(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
+ ASSERT(!AreAliased(src, dst, csp));
+
+ if (emit_debug_code()) {
+ // Check copy length.
+ Cmp(length, 0);
+ Assert(ge, kUnexpectedNegativeValue);
+
+ // Check src and dst buffers don't overlap.
+ Add(scratch, src, length); // Calculate end of src buffer.
+ Cmp(scratch, dst);
+ Add(scratch, dst, length); // Calculate end of dst buffer.
+ Ccmp(scratch, src, ZFlag, gt);
+ Assert(le, kCopyBuffersOverlap);
+ }
+
+ Label short_copy, short_loop, bulk_loop, done;
+
+ if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
+ Register bulk_length = scratch;
+ int pair_size = 2 * kXRegSize;
+ int pair_mask = pair_size - 1;
+
+ Bic(bulk_length, length, pair_mask);
+ Cbz(bulk_length, &short_copy);
+ Bind(&bulk_loop);
+ Sub(bulk_length, bulk_length, pair_size);
+ Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
+ Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
+ Cbnz(bulk_length, &bulk_loop);
+
+ And(length, length, pair_mask);
+ }
+
+ Bind(&short_copy);
+ Cbz(length, &done);
+ Bind(&short_loop);
+ Sub(length, length, 1);
+ Ldrb(tmp1, MemOperand(src, 1, PostIndex));
+ Strb(tmp1, MemOperand(dst, 1, PostIndex));
+ Cbnz(length, &short_loop);
+
+
+ Bind(&done);
+}
+
+
+void MacroAssembler::FillFields(Register dst,
+ Register field_count,
+ Register filler) {
+ ASSERT(!dst.Is(csp));
+ UseScratchRegisterScope temps(this);
+ Register field_ptr = temps.AcquireX();
+ Register counter = temps.AcquireX();
+ Label done;
+
+ // Decrement count. If the result < zero, count was zero, and there's nothing
+ // to do. If count was one, flags are set to fail the gt condition at the end
+ // of the pairs loop.
+ Subs(counter, field_count, 1);
+ B(lt, &done);
+
+ // There's at least one field to fill, so do this unconditionally.
+ Str(filler, MemOperand(dst, kPointerSize, PostIndex));
+
+ // If the bottom bit of counter is set, there are an even number of fields to
+ // fill, so pull the start pointer back by one field, allowing the pairs loop
+ // to overwrite the field that was stored above.
+ And(field_ptr, counter, 1);
+ Sub(field_ptr, dst, Operand(field_ptr, LSL, kPointerSizeLog2));
+
+ // Store filler to memory in pairs.
+ Label entry, loop;
+ B(&entry);
+ Bind(&loop);
+ Stp(filler, filler, MemOperand(field_ptr, 2 * kPointerSize, PostIndex));
+ Subs(counter, counter, 2);
+ Bind(&entry);
+ B(gt, &loop);
+
+ Bind(&done);
+}
+
+
+void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure,
+ SmiCheckType smi_check) {
+
+ if (smi_check == DO_SMI_CHECK) {
+ JumpIfEitherSmi(first, second, failure);
+ } else if (emit_debug_code()) {
+ ASSERT(smi_check == DONT_DO_SMI_CHECK);
+ Label not_smi;
+ JumpIfEitherSmi(first, second, NULL, &not_smi);
+
+ // At least one input is a smi, but the flags indicated a smi check wasn't
+ // needed.
+ Abort(kUnexpectedSmi);
+
+ Bind(&not_smi);
+ }
+
+ // Test that both first and second are sequential ASCII strings.
+ Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+ Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+
+ JumpIfEitherInstanceTypeIsNotSequentialAscii(scratch1,
+ scratch2,
+ scratch1,
+ scratch2,
+ failure);
+}
+
+
+void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ ASSERT(!AreAliased(scratch1, second));
+ ASSERT(!AreAliased(scratch1, scratch2));
+ static const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ static const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ And(scratch1, first, kFlatAsciiStringMask);
+ And(scratch2, second, kFlatAsciiStringMask);
+ Cmp(scratch1, kFlatAsciiStringTag);
+ Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
+ B(ne, failure);
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure) {
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ And(scratch, type, kFlatAsciiStringMask);
+ Cmp(scratch, kFlatAsciiStringTag);
+ B(ne, failure);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ ASSERT(!AreAliased(first, second, scratch1, scratch2));
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ And(scratch1, first, kFlatAsciiStringMask);
+ And(scratch2, second, kFlatAsciiStringMask);
+ Cmp(scratch1, kFlatAsciiStringTag);
+ Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
+ B(ne, failure);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueName(Register type,
+ Label* not_unique_name) {
+ STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
+ // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
+ // continue
+ // } else {
+ // goto not_unique_name
+ // }
+ Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
+ Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
+ B(ne, not_unique_name);
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_reg,
+ Label* done,
+ InvokeFlag flag,
+ bool* definitely_mismatches,
+ const CallWrapper& call_wrapper) {
+ bool definitely_matches = false;
+ *definitely_mismatches = false;
+ Label regular_invoke;
+
+ // Check whether the expected and actual arguments count match. If not,
+ // setup registers according to contract with ArgumentsAdaptorTrampoline:
+ // x0: actual arguments count.
+ // x1: function (passed through to callee).
+ // x2: expected arguments count.
+
+ // The code below is made a lot easier because the calling code already sets
+ // up actual and expected registers according to the contract if values are
+ // passed in registers.
+ ASSERT(actual.is_immediate() || actual.reg().is(x0));
+ ASSERT(expected.is_immediate() || expected.reg().is(x2));
+ ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
+
+ if (expected.is_immediate()) {
+ ASSERT(actual.is_immediate());
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+
+ } else {
+ Mov(x0, actual.immediate());
+ if (expected.immediate() ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ // Don't worry about adapting arguments for builtins that
+ // don't want that done. Skip adaption code by making it look
+ // like we have a match between expected and actual number of
+ // arguments.
+ definitely_matches = true;
+ } else {
+ *definitely_mismatches = true;
+ // Set up x2 for the argument adaptor.
+ Mov(x2, expected.immediate());
+ }
+ }
+
+ } else { // expected is a register.
+ Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
+ : Operand(actual.reg());
+ // If actual == expected perform a regular invocation.
+ Cmp(expected.reg(), actual_op);
+ B(eq, &regular_invoke);
+ // Otherwise set up x0 for the argument adaptor.
+ Mov(x0, actual_op);
+ }
+
+ // If the argument counts may mismatch, generate a call to the argument
+ // adaptor.
+ if (!definitely_matches) {
+ if (!code_constant.is_null()) {
+ Mov(x3, Operand(code_constant));
+ Add(x3, x3, Code::kHeaderSize - kHeapObjectTag);
+ }
+
+ Handle<Code> adaptor =
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(adaptor));
+ Call(adaptor);
+ call_wrapper.AfterCall();
+ if (!*definitely_mismatches) {
+ // If the arg counts don't match, no extra code is emitted by
+ // MAsm::InvokeCode and we can just fall through.
+ B(done);
+ }
+ } else {
+ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+ }
+ Bind(&regular_invoke);
+}
+
+
+void MacroAssembler::InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ Label done;
+
+ bool definitely_mismatches = false;
+ InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
+ &definitely_mismatches, call_wrapper);
+
+ // If we are certain that actual != expected, then we know InvokePrologue will
+ // have handled the call through the argument adaptor mechanism.
+ // The called function expects the call kind in x5.
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ Call(code);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ Jump(code);
+ }
+ }
+
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ Bind(&done);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ // Contract with called JS functions requires that function is passed in x1.
+ // (See FullCodeGenerator::Generate().)
+ ASSERT(function.is(x1));
+
+ Register expected_reg = x2;
+ Register code_reg = x3;
+
+ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+ // The number of arguments is stored as an int32_t, and -1 is a marker
+ // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
+ // extension to correctly handle it.
+ Ldr(expected_reg, FieldMemOperand(function,
+ JSFunction::kSharedFunctionInfoOffset));
+ Ldrsw(expected_reg,
+ FieldMemOperand(expected_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ Ldr(code_reg,
+ FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+
+ ParameterCount expected(expected_reg);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
+ // Contract with called JS functions requires that function is passed in x1.
+ // (See FullCodeGenerator::Generate().)
+ ASSERT(function.Is(x1));
+
+ Register code_reg = x3;
+
+ // Set up the context.
+ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // Contract with called JS functions requires that function is passed in x1.
+ // (See FullCodeGenerator::Generate().)
+ __ LoadObject(x1, function);
+ InvokeFunction(x1, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::TryConvertDoubleToInt64(Register result,
+ DoubleRegister double_input,
+ Label* done) {
+ // Try to convert with an FPU convert instruction. It's trivial to compute
+ // the modulo operation on an integer register so we convert to a 64-bit
+ // integer.
+ //
+ // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
+ // when the double is out of range. NaNs and infinities will be converted to 0
+ // (as ECMA-262 requires).
+ Fcvtzs(result.X(), double_input);
+
+ // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
+ // representable using a double, so if the result is one of those then we know
+ // that saturation occured, and we need to manually handle the conversion.
+ //
+ // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
+ // 1 will cause signed overflow.
+ Cmp(result.X(), 1);
+ Ccmp(result.X(), -1, VFlag, vc);
+
+ B(vc, done);
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result,
+ DoubleRegister double_input) {
+ Label done;
+ ASSERT(jssp.Is(StackPointer()));
+
+ // Try to convert the double to an int64. If successful, the bottom 32 bits
+ // contain our truncated int32 result.
+ TryConvertDoubleToInt64(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ Push(lr);
+ Push(double_input); // Put input on stack.
+
+ DoubleToIStub stub(jssp,
+ result,
+ 0,
+ true, // is_truncating
+ true); // skip_fastpath
+ CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
+
+ Drop(1, kDoubleSize); // Drop the double input on the stack.
+ Pop(lr);
+
+ Bind(&done);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result,
+ Register object) {
+ Label done;
+ ASSERT(!result.is(object));
+ ASSERT(jssp.Is(StackPointer()));
+
+ Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ // Try to convert the double to an int64. If successful, the bottom 32 bits
+ // contain our truncated int32 result.
+ TryConvertDoubleToInt64(result, fp_scratch, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ Push(lr);
+ DoubleToIStub stub(object,
+ result,
+ HeapNumber::kValueOffset - kHeapObjectTag,
+ true, // is_truncating
+ true); // skip_fastpath
+ CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
+ Pop(lr);
+
+ Bind(&done);
+}
+
+
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ ASSERT(StackPointer().Is(jssp));
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ __ Mov(temp, Smi::FromInt(StackFrame::STUB));
+ // Compiled stubs don't age, and so they don't need the predictable code
+ // ageing sequence.
+ __ Push(lr, fp, cp, temp);
+ __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ if (isolate()->IsCodePreAgingActive()) {
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ __ EmitCodeAgeSequence(stub);
+ } else {
+ __ EmitFrameSetupForCodeAgePatching();
+ }
+ }
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+ ASSERT(jssp.Is(StackPointer()));
+ UseScratchRegisterScope temps(this);
+ Register type_reg = temps.AcquireX();
+ Register code_reg = temps.AcquireX();
+
+ Push(lr, fp, cp);
+ Mov(type_reg, Smi::FromInt(type));
+ Mov(code_reg, Operand(CodeObject()));
+ Push(type_reg, code_reg);
+ // jssp[4] : lr
+ // jssp[3] : fp
+ // jssp[2] : cp
+ // jssp[1] : type
+ // jssp[0] : code object
+
+ // Adjust FP to point to saved FP.
+ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+ ASSERT(jssp.Is(StackPointer()));
+ // Drop the execution stack down to the frame pointer and restore
+ // the caller frame pointer and return address.
+ Mov(jssp, fp);
+ AssertStackConsistency();
+ Pop(fp, lr);
+}
+
+
+void MacroAssembler::ExitFramePreserveFPRegs() {
+ PushCPURegList(kCallerSavedFP);
+}
+
+
+void MacroAssembler::ExitFrameRestoreFPRegs() {
+ // Read the registers from the stack without popping them. The stack pointer
+ // will be reset as part of the unwinding process.
+ CPURegList saved_fp_regs = kCallerSavedFP;
+ ASSERT(saved_fp_regs.Count() % 2 == 0);
+
+ int offset = ExitFrameConstants::kLastExitFrameField;
+ while (!saved_fp_regs.IsEmpty()) {
+ const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
+ const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
+ offset -= 2 * kDRegSize;
+ Ldp(dst1, dst0, MemOperand(fp, offset));
+ }
+}
+
+
+void MacroAssembler::EnterExitFrame(bool save_doubles,
+ const Register& scratch,
+ int extra_space) {
+ ASSERT(jssp.Is(StackPointer()));
+
+ // Set up the new stack frame.
+ Mov(scratch, Operand(CodeObject()));
+ Push(lr, fp);
+ Mov(fp, StackPointer());
+ Push(xzr, scratch);
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // jssp -> fp[-16]: CodeObject()
+ STATIC_ASSERT((2 * kPointerSize) ==
+ ExitFrameConstants::kCallerSPDisplacement);
+ STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
+ STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
+ STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset);
+ STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset);
+
+ // Save the frame pointer and context pointer in the top frame.
+ Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+ isolate())));
+ Str(fp, MemOperand(scratch));
+ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ isolate())));
+ Str(cp, MemOperand(scratch));
+
+ STATIC_ASSERT((-2 * kPointerSize) ==
+ ExitFrameConstants::kLastExitFrameField);
+ if (save_doubles) {
+ ExitFramePreserveFPRegs();
+ }
+
+ // Reserve space for the return address and for user requested memory.
+ // We do this before aligning to make sure that we end up correctly
+ // aligned with the minimum of wasted space.
+ Claim(extra_space + 1, kXRegSize);
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // fp[-16]: CodeObject()
+ // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
+ // jssp[8]: Extra space reserved for caller (if extra_space != 0).
+ // jssp -> jssp[0]: Space reserved for the return address.
+
+ // Align and synchronize the system stack pointer with jssp.
+ AlignAndSetCSPForFrame();
+ ASSERT(csp.Is(StackPointer()));
+
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: Space reserved for SPOffset.
+ // fp[-16]: CodeObject()
+ // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
+ // csp[8]: Memory reserved for the caller if extra_space != 0.
+ // Alignment padding, if necessary.
+ // csp -> csp[0]: Space reserved for the return address.
+
+ // ExitFrame::GetStateForFramePointer expects to find the return address at
+ // the memory address immediately below the pointer stored in SPOffset.
+ // It is not safe to derive much else from SPOffset, because the size of the
+ // padding can vary.
+ Add(scratch, csp, kXRegSize);
+ Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
+}
+
+
+// Leave the current exit frame.
+void MacroAssembler::LeaveExitFrame(bool restore_doubles,
+ const Register& scratch,
+ bool restore_context) {
+ ASSERT(csp.Is(StackPointer()));
+
+ if (restore_doubles) {
+ ExitFrameRestoreFPRegs();
+ }
+
+ // Restore the context pointer from the top frame.
+ if (restore_context) {
+ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ isolate())));
+ Ldr(cp, MemOperand(scratch));
+ }
+
+ if (emit_debug_code()) {
+ // Also emit debug code to clear the cp in the top frame.
+ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ isolate())));
+ Str(xzr, MemOperand(scratch));
+ }
+ // Clear the frame pointer from the top frame.
+ Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+ isolate())));
+ Str(xzr, MemOperand(scratch));
+
+ // Pop the exit frame.
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[...]: The rest of the frame.
+ Mov(jssp, fp);
+ SetStackPointer(jssp);
+ AssertStackConsistency();
+ Pop(fp, lr);
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Mov(scratch1, value);
+ Mov(scratch2, ExternalReference(counter));
+ Str(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ ASSERT(value != 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Mov(scratch2, ExternalReference(counter));
+ Ldr(scratch1, MemOperand(scratch2));
+ Add(scratch1, scratch1, value);
+ Str(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ IncrementCounter(counter, -value, scratch1, scratch2);
+}
+
+
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ for (int i = 1; i < context_chain_length; i++) {
+ Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ }
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in cp).
+ Mov(dst, cp);
+ }
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void MacroAssembler::DebugBreak() {
+ Mov(x0, 0);
+ Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
+ CEntryStub ces(1);
+ ASSERT(AllowThisStubCall(&ces));
+ Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
+}
+#endif
+
+
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
+ int handler_index) {
+ ASSERT(jssp.Is(StackPointer()));
+ // Adjust this code if the asserts don't hold.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // For the JSEntry handler, we must preserve the live registers x0-x4.
+ // (See JSEntryStub::GenerateBody().)
+
+ unsigned state =
+ StackHandler::IndexField::encode(handler_index) |
+ StackHandler::KindField::encode(kind);
+
+ // Set up the code object and the state for pushing.
+ Mov(x10, Operand(CodeObject()));
+ Mov(x11, state);
+
+ // Push the frame pointer, context, state, and code object.
+ if (kind == StackHandler::JS_ENTRY) {
+ ASSERT(Smi::FromInt(0) == 0);
+ Push(xzr, xzr, x11, x10);
+ } else {
+ Push(fp, cp, x11, x10);
+ }
+
+ // Link the current handler as the next handler.
+ Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
+ Ldr(x10, MemOperand(x11));
+ Push(x10);
+ // Set this new handler as the current one.
+ Str(jssp, MemOperand(x11));
+}
+
+
+void MacroAssembler::PopTryHandler() {
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ Pop(x10);
+ Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
+ Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
+ Str(x10, MemOperand(x11));
+}
+
+
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ // We apply salt to the original zap value to easily spot the values.
+ Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
+ Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
+ Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
+ }
+ B(gc_required);
+ return;
+ }
+
+ UseScratchRegisterScope temps(this);
+ Register scratch3 = temps.AcquireX();
+
+ ASSERT(!AreAliased(result, scratch1, scratch2, scratch3));
+ ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ ASSERT(0 == (object_size & kObjectAlignmentMask));
+
+ // Check relative positions of allocation top and limit addresses.
+ // The values must be adjacent in memory to allow the use of LDP.
+ ExternalReference heap_allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference heap_allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
+ ASSERT((limit - top) == kPointerSize);
+
+ // Set up allocation top address and object size registers.
+ Register top_address = scratch1;
+ Register allocation_limit = scratch2;
+ Mov(top_address, Operand(heap_allocation_top));
+
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and the allocation limit.
+ Ldp(result, allocation_limit, MemOperand(top_address));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry.
+ Ldr(scratch3, MemOperand(top_address));
+ Cmp(result, scratch3);
+ Check(eq, kUnexpectedAllocationTop);
+ }
+ // Load the allocation limit. 'result' already contains the allocation top.
+ Ldr(allocation_limit, MemOperand(top_address, limit - top));
+ }
+
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on ARM64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+ // Calculate new top and bail out if new space is exhausted.
+ Adds(scratch3, result, object_size);
+ B(vs, gc_required);
+ Cmp(scratch3, allocation_limit);
+ B(hi, gc_required);
+ Str(scratch3, MemOperand(top_address));
+
+ // Tag the object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ Orr(result, result, kHeapObjectTag);
+ }
+}
+
+
+void MacroAssembler::Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ // We apply salt to the original zap value to easily spot the values.
+ Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
+ Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
+ Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
+ }
+ B(gc_required);
+ return;
+ }
+
+ UseScratchRegisterScope temps(this);
+ Register scratch3 = temps.AcquireX();
+
+ ASSERT(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
+ ASSERT(object_size.Is64Bits() && result.Is64Bits() &&
+ scratch1.Is64Bits() && scratch2.Is64Bits());
+
+ // Check relative positions of allocation top and limit addresses.
+ // The values must be adjacent in memory to allow the use of LDP.
+ ExternalReference heap_allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference heap_allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
+ ASSERT((limit - top) == kPointerSize);
+
+ // Set up allocation top address and object size registers.
+ Register top_address = scratch1;
+ Register allocation_limit = scratch2;
+ Mov(top_address, heap_allocation_top);
+
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and the allocation limit.
+ Ldp(result, allocation_limit, MemOperand(top_address));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry.
+ Ldr(scratch3, MemOperand(top_address));
+ Cmp(result, scratch3);
+ Check(eq, kUnexpectedAllocationTop);
+ }
+ // Load the allocation limit. 'result' already contains the allocation top.
+ Ldr(allocation_limit, MemOperand(top_address, limit - top));
+ }
+
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on ARM64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+ // Calculate new top and bail out if new space is exhausted
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2));
+ } else {
+ Adds(scratch3, result, object_size);
+ }
+
+ if (emit_debug_code()) {
+ Tst(scratch3, kObjectAlignmentMask);
+ Check(eq, kUnalignedAllocationInNewSpace);
+ }
+
+ B(vs, gc_required);
+ Cmp(scratch3, allocation_limit);
+ B(hi, gc_required);
+ Str(scratch3, MemOperand(top_address));
+
+ // Tag the object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ Orr(result, result, kHeapObjectTag);
+ }
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object,
+ Register scratch) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ // Make sure the object has no tag before resetting top.
+ Bic(object, object, kHeapObjectTagMask);
+#ifdef DEBUG
+ // Check that the object un-allocated is below the current top.
+ Mov(scratch, new_space_allocation_top);
+ Ldr(scratch, MemOperand(scratch));
+ Cmp(object, scratch);
+ Check(lt, kUndoAllocationOfNonAllocatedMemory);
+#endif
+ // Write the address of the object to un-allocate as the current top.
+ Mov(scratch, new_space_allocation_top);
+ Str(object, MemOperand(scratch));
+}
+
+
+void MacroAssembler::AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ Add(scratch1, length, length); // Length in bytes, not chars.
+ Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
+ Bic(scratch1, scratch1, kObjectAlignmentMask);
+
+ // Allocate two-byte string in new space.
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result,
+ length,
+ Heap::kStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ STATIC_ASSERT(kCharSize == 1);
+ Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
+ Bic(scratch1, scratch1, kObjectAlignmentMask);
+
+ // Allocate ASCII string in new space.
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result,
+ length,
+ Heap::kAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kConsStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Label allocate_new_space, install_map;
+ AllocationFlags flags = TAG_OBJECT;
+
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(isolate());
+ Mov(scratch1, high_promotion_mode);
+ Ldr(scratch1, MemOperand(scratch1));
+ Cbz(scratch1, &allocate_new_space);
+
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
+
+ B(&install_map);
+
+ Bind(&allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ flags);
+
+ Bind(&install_map);
+
+ InitializeNewString(result,
+ length,
+ Heap::kConsAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2));
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kSlicedStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ ASSERT(!AreAliased(result, length, scratch1, scratch2));
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result,
+ length,
+ Heap::kSlicedAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+// Allocates a heap number or jumps to the need_gc label if the young space
+// is full and a scavenge is needed.
+void MacroAssembler::AllocateHeapNumber(Register result,
+ Label* gc_required,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map) {
+ // Allocate an object in the heap for the heap number and tag it as a heap
+ // object.
+ Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
+
+ // Store heap number map in the allocated object.
+ if (heap_number_map.Is(NoReg)) {
+ heap_number_map = scratch1;
+ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ }
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+}
+
+
+void MacroAssembler::AllocateHeapNumberWithValue(Register result,
+ DoubleRegister value,
+ Label* gc_required,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map) {
+ // TODO(all): Check if it would be more efficient to use STP to store both
+ // the map and the value.
+ AllocateHeapNumber(result, gc_required, scratch1, scratch2, heap_number_map);
+ Str(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+
+void MacroAssembler::JumpIfObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_cond_pass,
+ Condition cond) {
+ CompareObjectType(object, map, type_reg, type);
+ B(cond, if_cond_pass);
+}
+
+
+void MacroAssembler::JumpIfNotObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_not_object) {
+ JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
+}
+
+
+// Sets condition flags based on comparison, and returns type in type_reg.
+void MacroAssembler::CompareObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type) {
+ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(map, type_reg, type);
+}
+
+
+// Sets condition flags based on comparison, and returns type in type_reg.
+void MacroAssembler::CompareInstanceType(Register map,
+ Register type_reg,
+ InstanceType type) {
+ Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Cmp(type_reg, type);
+}
+
+
+void MacroAssembler::CompareMap(Register obj,
+ Register scratch,
+ Handle<Map> map) {
+ Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ CompareMap(scratch, map);
+}
+
+
+void MacroAssembler::CompareMap(Register obj_map,
+ Handle<Map> map) {
+ Cmp(obj_map, Operand(map));
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+
+ CompareMap(obj, scratch, map);
+ B(ne, fail);
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+ Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ JumpIfNotRoot(scratch, index, fail);
+}
+
+
+void MacroAssembler::CheckMap(Register obj_map,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj_map, fail);
+ }
+
+ CompareMap(obj_map, map);
+ B(ne, fail);
+}
+
+
+void MacroAssembler::DispatchMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type) {
+ Label fail;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, &fail);
+ }
+ Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ Cmp(scratch, Operand(map));
+ B(ne, &fail);
+ Jump(success, RelocInfo::CODE_TARGET);
+ Bind(&fail);
+}
+
+
+void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+ Tst(temp, mask);
+}
+
+
+void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
+ // Load the map's "bit field 2".
+ __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
+}
+
+
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss,
+ BoundFunctionAction action) {
+ ASSERT(!AreAliased(function, result, scratch));
+
+ // Check that the receiver isn't a smi.
+ JumpIfSmi(function, miss);
+
+ // Check that the function really is a function. Load map into result reg.
+ JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
+
+ if (action == kMissOnBoundFunction) {
+ Register scratch_w = scratch.W();
+ Ldr(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ // On 64-bit platforms, compiler hints field is not a smi. See definition of
+ // kCompilerHintsOffset in src/objects.h.
+ Ldr(scratch_w,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
+ }
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ Ldr(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // If the prototype or initial map is the hole, don't return it and simply
+ // miss the cache instead. This will allow us to allocate a prototype object
+ // on-demand in the runtime system.
+ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
+
+ // Get the prototype from the initial map.
+ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ B(&done);
+
+ // Non-instance prototype: fetch prototype from constructor field in initial
+ // map.
+ Bind(&non_instance);
+ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ Bind(&done);
+}
+
+
+void MacroAssembler::CompareRoot(const Register& obj,
+ Heap::RootListIndex index) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ ASSERT(!AreAliased(obj, temp));
+ LoadRoot(temp, index);
+ Cmp(obj, temp);
+}
+
+
+void MacroAssembler::JumpIfRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_equal) {
+ CompareRoot(obj, index);
+ B(eq, if_equal);
+}
+
+
+void MacroAssembler::JumpIfNotRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_not_equal) {
+ CompareRoot(obj, index);
+ B(ne, if_not_equal);
+}
+
+
+void MacroAssembler::CompareAndSplit(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if ((if_true == if_false) && (if_false == fall_through)) {
+ // Fall through.
+ } else if (if_true == if_false) {
+ B(if_true);
+ } else if (if_false == fall_through) {
+ CompareAndBranch(lhs, rhs, cond, if_true);
+ } else if (if_true == fall_through) {
+ CompareAndBranch(lhs, rhs, InvertCondition(cond), if_false);
+ } else {
+ CompareAndBranch(lhs, rhs, cond, if_true);
+ B(if_false);
+ }
+}
+
+
+void MacroAssembler::TestAndSplit(const Register& reg,
+ uint64_t bit_pattern,
+ Label* if_all_clear,
+ Label* if_any_set,
+ Label* fall_through) {
+ if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
+ // Fall through.
+ } else if (if_all_clear == if_any_set) {
+ B(if_all_clear);
+ } else if (if_all_clear == fall_through) {
+ TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
+ } else if (if_any_set == fall_through) {
+ TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
+ } else {
+ TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
+ B(if_all_clear);
+ }
+}
+
+
+void MacroAssembler::CheckFastElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
+ B(hi, fail);
+}
+
+
+void MacroAssembler::CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+ // If cond==ls, set cond=hi, otherwise compare.
+ Ccmp(scratch,
+ Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
+ B(hi, fail);
+}
+
+
+// Note: The ARM version of this clobbers elements_reg, but this version does
+// not. Some uses of this in ARM64 assume that elements_reg will be preserved.
+void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register elements_reg,
+ Register scratch1,
+ FPRegister fpscratch1,
+ FPRegister fpscratch2,
+ Label* fail,
+ int elements_offset) {
+ ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
+ Label store_num;
+
+ // Speculatively convert the smi to a double - all smis can be exactly
+ // represented as a double.
+ SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
+
+ // If value_reg is a smi, we're done.
+ JumpIfSmi(value_reg, &store_num);
+
+ // Ensure that the object is a heap number.
+ CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(),
+ fail, DONT_DO_SMI_CHECK);
+
+ Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+ Fmov(fpscratch2, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+
+ // Check for NaN by comparing the number to itself: NaN comparison will
+ // report unordered, indicated by the overflow flag being set.
+ Fcmp(fpscratch1, fpscratch1);
+ Fcsel(fpscratch1, fpscratch2, fpscratch1, vs);
+
+ // Store the result.
+ Bind(&store_num);
+ Add(scratch1, elements_reg,
+ Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
+ Str(fpscratch1,
+ FieldMemOperand(scratch1,
+ FixedDoubleArray::kHeaderSize - elements_offset));
+}
+
+
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
+}
+
+
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ STATIC_ASSERT(kSmiTag == 0);
+ Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
+ SmiTag(index, hash);
+}
+
+
+void MacroAssembler::EmitSeqStringSetCharCheck(
+ Register string,
+ Register index,
+ SeqStringSetCharCheckIndexType index_type,
+ Register scratch,
+ uint32_t encoding_mask) {
+ ASSERT(!AreAliased(string, index, scratch));
+
+ if (index_type == kIndexIsSmi) {
+ AssertSmi(index);
+ }
+
+ // Check that string is an object.
+ AssertNotSmi(string, kNonObject);
+
+ // Check that string has an appropriate map.
+ Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+ And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
+ Cmp(scratch, encoding_mask);
+ Check(eq, kUnexpectedStringType);
+
+ Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
+ Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
+ Check(lt, kIndexIsTooLarge);
+
+ ASSERT_EQ(0, Smi::FromInt(0));
+ Cmp(index, 0);
+ Check(ge, kIndexIsNegative);
+}
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
+ Label same_contexts;
+
+ // Load current lexical context from the stack frame.
+ Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // In debug mode, make sure the lexical context is set.
+#ifdef DEBUG
+ Cmp(scratch1, 0);
+ Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
+#endif
+
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
+ Ldr(scratch1, FieldMemOperand(scratch1, offset));
+ Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ // Read the first word and compare to the global_context_map.
+ Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
+ CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
+ Check(eq, kExpectedNativeContext);
+ }
+
+ // Check if both contexts are the same.
+ Ldr(scratch2, FieldMemOperand(holder_reg,
+ JSGlobalProxy::kNativeContextOffset));
+ Cmp(scratch1, scratch2);
+ B(&same_contexts, eq);
+
+ // Check the context is a native context.
+ if (emit_debug_code()) {
+ // We're short on scratch registers here, so use holder_reg as a scratch.
+ Push(holder_reg);
+ Register scratch3 = holder_reg;
+
+ CompareRoot(scratch2, Heap::kNullValueRootIndex);
+ Check(ne, kExpectedNonNullContext);
+
+ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+ CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
+ Check(eq, kExpectedNativeContext);
+ Pop(holder_reg);
+ }
+
+ // Check that the security token in the calling global object is
+ // compatible with the security token in the receiving global
+ // object.
+ int token_offset = Context::kHeaderSize +
+ Context::SECURITY_TOKEN_INDEX * kPointerSize;
+
+ Ldr(scratch1, FieldMemOperand(scratch1, token_offset));
+ Ldr(scratch2, FieldMemOperand(scratch2, token_offset));
+ Cmp(scratch1, scratch2);
+ B(miss, ne);
+
+ Bind(&same_contexts);
+}
+
+
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
+void MacroAssembler::GetNumberHash(Register key, Register scratch) {
+ ASSERT(!AreAliased(key, scratch));
+
+ // Xor original key with a seed.
+ LoadRoot(scratch, Heap::kHashSeedRootIndex);
+ Eor(key, key, Operand::UntagSmi(scratch));
+
+ // The algorithm uses 32-bit integer values.
+ key = key.W();
+ scratch = scratch.W();
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash <<1 15);
+ Mvn(scratch, key);
+ Add(key, scratch, Operand(key, LSL, 15));
+ // hash = hash ^ (hash >> 12);
+ Eor(key, key, Operand(key, LSR, 12));
+ // hash = hash + (hash << 2);
+ Add(key, key, Operand(key, LSL, 2));
+ // hash = hash ^ (hash >> 4);
+ Eor(key, key, Operand(key, LSR, 4));
+ // hash = hash * 2057;
+ Mov(scratch, Operand(key, LSL, 11));
+ Add(key, key, Operand(key, LSL, 3));
+ Add(key, key, scratch);
+ // hash = hash ^ (hash >> 16);
+ Eor(key, key, Operand(key, LSR, 16));
+}
+
+
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
+
+ Label done;
+
+ SmiUntag(scratch0, key);
+ GetNumberHash(scratch0, scratch1);
+
+ // Compute the capacity mask.
+ Ldrsw(scratch1,
+ UntagSmiFieldMemOperand(elements,
+ SeededNumberDictionary::kCapacityOffset));
+ Sub(scratch1, scratch1, 1);
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
+ } else {
+ Mov(scratch2, scratch0);
+ }
+ And(scratch2, scratch2, scratch1);
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(SeededNumberDictionary::kEntrySize == 3);
+ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
+
+ // Check if the key is identical to the name.
+ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
+ Ldr(scratch3,
+ FieldMemOperand(scratch2,
+ SeededNumberDictionary::kElementsStartOffset));
+ Cmp(key, scratch3);
+ if (i != (kNumberDictionaryProbes - 1)) {
+ B(eq, &done);
+ } else {
+ B(ne, miss);
+ }
+ }
+
+ Bind(&done);
+ // Check that the value is a normal property.
+ const int kDetailsOffset =
+ SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
+ TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
+
+ // Get the value at the masked, scaled index and return.
+ const int kValueOffset =
+ SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+ Ldr(result, FieldMemOperand(scratch2, kValueOffset));
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
+ Register address,
+ Register scratch1,
+ SaveFPRegsMode fp_mode,
+ RememberedSetFinalAction and_then) {
+ ASSERT(!AreAliased(object, address, scratch1));
+ Label done, store_buffer_overflow;
+ if (emit_debug_code()) {
+ Label ok;
+ JumpIfNotInNewSpace(object, &ok);
+ Abort(kRememberedSetPointerInNewSpace);
+ bind(&ok);
+ }
+ UseScratchRegisterScope temps(this);
+ Register scratch2 = temps.AcquireX();
+
+ // Load store buffer top.
+ Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
+ Ldr(scratch1, MemOperand(scratch2));
+ // Store pointer to buffer and increment buffer top.
+ Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
+ // Write back new top of buffer.
+ Str(scratch1, MemOperand(scratch2));
+ // Call stub on end of buffer.
+ // Check for end of buffer.
+ ASSERT(StoreBuffer::kStoreBufferOverflowBit ==
+ (1 << (14 + kPointerSizeLog2)));
+ if (and_then == kFallThroughAtEnd) {
+ Tbz(scratch1, (14 + kPointerSizeLog2), &done);
+ } else {
+ ASSERT(and_then == kReturnAtEnd);
+ Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
+ Ret();
+ }
+
+ Bind(&store_buffer_overflow);
+ Push(lr);
+ StoreBufferOverflowStub store_buffer_overflow_stub =
+ StoreBufferOverflowStub(fp_mode);
+ CallStub(&store_buffer_overflow_stub);
+ Pop(lr);
+
+ Bind(&done);
+ if (and_then == kReturnAtEnd) {
+ Ret();
+ }
+}
+
+
+void MacroAssembler::PopSafepointRegisters() {
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ PopXRegList(kSafepointSavedRegisters);
+ Drop(num_unsaved);
+}
+
+
+void MacroAssembler::PushSafepointRegisters() {
+ // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
+ // adjust the stack for unsaved registers.
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ ASSERT(num_unsaved >= 0);
+ Claim(num_unsaved);
+ PushXRegList(kSafepointSavedRegisters);
+}
+
+
+void MacroAssembler::PushSafepointRegistersAndDoubles() {
+ PushSafepointRegisters();
+ PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ FPRegister::kAllocatableFPRegisters));
+}
+
+
+void MacroAssembler::PopSafepointRegistersAndDoubles() {
+ PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ FPRegister::kAllocatableFPRegisters));
+ PopSafepointRegisters();
+}
+
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+ // Make sure the safepoint registers list is what we expect.
+ ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
+
+ // Safepoint registers are stored contiguously on the stack, but not all the
+ // registers are saved. The following registers are excluded:
+ // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
+ // the macro assembler.
+ // - x28 (jssp) because JS stack pointer doesn't need to be included in
+ // safepoint registers.
+ // - x31 (csp) because the system stack pointer doesn't need to be included
+ // in safepoint registers.
+ //
+ // This function implements the mapping of register code to index into the
+ // safepoint register slots.
+ if ((reg_code >= 0) && (reg_code <= 15)) {
+ return reg_code;
+ } else if ((reg_code >= 18) && (reg_code <= 27)) {
+ // Skip ip0 and ip1.
+ return reg_code - 2;
+ } else if ((reg_code == 29) || (reg_code == 30)) {
+ // Also skip jssp.
+ return reg_code - 3;
+ } else {
+ // This register has no safepoint register slot.
+ UNREACHABLE();
+ return -1;
+ }
+}
+
+
+void MacroAssembler::CheckPageFlagSet(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_any_set) {
+ And(scratch, object, ~Page::kPageAlignmentMask);
+ Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ TestAndBranchIfAnySet(scratch, mask, if_any_set);
+}
+
+
+void MacroAssembler::CheckPageFlagClear(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_all_clear) {
+ And(scratch, object, ~Page::kPageAlignmentMask);
+ Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ TestAndBranchIfAllClear(scratch, mask, if_all_clear);
+}
+
+
+void MacroAssembler::RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip the barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
+
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so offset must be a multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize));
+
+ Add(scratch, object, offset - kHeapObjectTag);
+ if (emit_debug_code()) {
+ Label ok;
+ Tst(scratch, (1 << kPointerSizeLog2) - 1);
+ B(eq, &ok);
+ Abort(kUnalignedCellInWriteBarrier);
+ Bind(&ok);
+ }
+
+ RecordWrite(object,
+ scratch,
+ value,
+ lr_status,
+ save_fp,
+ remembered_set_action,
+ OMIT_SMI_CHECK);
+
+ Bind(&done);
+
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ Mov(value, Operand(BitCast<int64_t>(kZapValue + 4)));
+ Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8)));
+ }
+}
+
+
+// Will clobber: object, address, value.
+// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
+//
+// The register 'object' contains a heap object pointer. The heap object tag is
+// shifted away.
+void MacroAssembler::RecordWrite(Register object,
+ Register address,
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ ASM_LOCATION("MacroAssembler::RecordWrite");
+ ASSERT(!AreAliased(object, value));
+
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ Ldr(temp, MemOperand(address));
+ Cmp(temp, value);
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ // TODO(mstarzinger): Dynamic counter missing.
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ if (smi_check == INLINE_SMI_CHECK) {
+ ASSERT_EQ(0, kSmiTag);
+ JumpIfSmi(value, &done);
+ }
+
+ CheckPageFlagClear(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ &done);
+ CheckPageFlagClear(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ &done);
+
+ // Record the actual write.
+ if (lr_status == kLRHasNotBeenSaved) {
+ Push(lr);
+ }
+ RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ CallStub(&stub);
+ if (lr_status == kLRHasNotBeenSaved) {
+ Pop(lr);
+ }
+
+ Bind(&done);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ Mov(address, Operand(BitCast<int64_t>(kZapValue + 12)));
+ Mov(value, Operand(BitCast<int64_t>(kZapValue + 16)));
+ }
+}
+
+
+void MacroAssembler::AssertHasValidColor(const Register& reg) {
+ if (emit_debug_code()) {
+ // The bit sequence is backward. The first character in the string
+ // represents the least significant bit.
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ Label color_is_valid;
+ Tbnz(reg, 0, &color_is_valid);
+ Tbz(reg, 1, &color_is_valid);
+ Abort(kUnexpectedColorFound);
+ Bind(&color_is_valid);
+ }
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register shift_reg) {
+ ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg));
+ ASSERT(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
+ // addr_reg is divided into fields:
+ // |63 page base 20|19 high 8|7 shift 3|2 0|
+ // 'high' gives the index of the cell holding color bits for the object.
+ // 'shift' gives the offset in the cell for this object's color.
+ const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
+ Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
+ Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
+ // bitmap_reg:
+ // |63 page base 20|19 zeros 15|14 high 3|2 0|
+ Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+ Register bitmap_scratch,
+ Register shift_scratch,
+ Label* has_color,
+ int first_bit,
+ int second_bit) {
+ // See mark-compact.h for color definitions.
+ ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch));
+
+ GetMarkBits(object, bitmap_scratch, shift_scratch);
+ Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ // Shift the bitmap down to get the color of the object in bits [1:0].
+ Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
+
+ AssertHasValidColor(bitmap_scratch);
+
+ // These bit sequences are backwards. The first character in the string
+ // represents the least significant bit.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+
+ // Check for the color.
+ if (first_bit == 0) {
+ // Checking for white.
+ ASSERT(second_bit == 0);
+ // We only need to test the first bit.
+ Tbz(bitmap_scratch, 0, has_color);
+ } else {
+ Label other_color;
+ // Checking for grey or black.
+ Tbz(bitmap_scratch, 0, &other_color);
+ if (second_bit == 0) {
+ Tbz(bitmap_scratch, 1, has_color);
+ } else {
+ Tbnz(bitmap_scratch, 1, has_color);
+ }
+ Bind(&other_color);
+ }
+
+ // Fall through if it does not have the right color.
+}
+
+
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ Mov(scratch, Operand(map));
+ Ldrsw(scratch, UntagSmiFieldMemOperand(scratch, Map::kBitField3Offset));
+ TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated);
+ }
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black) {
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
+}
+
+
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!AreAliased(object, scratch0, scratch1));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // Scratch contains elements pointer.
+ Mov(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ Bind(&loop_again);
+ Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
+ Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
+ Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
+}
+
+
+void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
+ Register result) {
+ ASSERT(!result.Is(ldr_location));
+ const uint32_t kLdrLitOffset_lsb = 5;
+ const uint32_t kLdrLitOffset_width = 19;
+ Ldr(result, MemOperand(ldr_location));
+ if (emit_debug_code()) {
+ And(result, result, LoadLiteralFMask);
+ Cmp(result, LoadLiteralFixed);
+ Check(eq, kTheInstructionToPatchShouldBeAnLdrLiteral);
+ // The instruction was clobbered. Reload it.
+ Ldr(result, MemOperand(ldr_location));
+ }
+ Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width);
+ Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2));
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+ Register value,
+ Register bitmap_scratch,
+ Register shift_scratch,
+ Register load_scratch,
+ Register length_scratch,
+ Label* value_is_white_and_not_data) {
+ ASSERT(!AreAliased(
+ value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
+
+ // These bit sequences are backwards. The first character in the string
+ // represents the least significant bit.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+
+ GetMarkBits(value, bitmap_scratch, shift_scratch);
+ Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ Lsr(load_scratch, load_scratch, shift_scratch);
+
+ AssertHasValidColor(load_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ Label done;
+ Tbnz(load_scratch, 0, &done);
+
+ // Value is white. We check whether it is data that doesn't need scanning.
+ Register map = load_scratch; // Holds map while checking type.
+ Label is_data_object;
+
+ // Check for heap-number.
+ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ Mov(length_scratch, HeapNumber::kSize);
+ JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
+
+ // Check for strings.
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ Register instance_type = load_scratch;
+ Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ TestAndBranchIfAnySet(instance_type,
+ kIsIndirectStringMask | kIsNotStringMask,
+ value_is_white_and_not_data);
+
+ // It's a non-indirect (non-cons and non-slice) string.
+ // If it's external, the length is just ExternalString::kSize.
+ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+ // External strings are the only ones with the kExternalStringTag bit
+ // set.
+ ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ Mov(length_scratch, ExternalString::kSize);
+ TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
+
+ // Sequential string, either ASCII or UC16.
+ // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
+ // getting the length multiplied by 2.
+ ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+ Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
+ String::kLengthOffset));
+ Tst(instance_type, kStringEncodingMask);
+ Cset(load_scratch, eq);
+ Lsl(length_scratch, length_scratch, load_scratch);
+ Add(length_scratch,
+ length_scratch,
+ SeqString::kHeaderSize + kObjectAlignmentMask);
+ Bic(length_scratch, length_scratch, kObjectAlignmentMask);
+
+ Bind(&is_data_object);
+ // Value is a data object, and it is white. Mark it black. Since we know
+ // that the object is white we can make it black by flipping one bit.
+ Register mask = shift_scratch;
+ Mov(load_scratch, 1);
+ Lsl(mask, load_scratch, shift_scratch);
+
+ Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ Orr(load_scratch, load_scratch, mask);
+ Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+ Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask);
+ Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ Add(load_scratch, load_scratch, length_scratch);
+ Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+ Bind(&done);
+}
+
+
+void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
+ if (emit_debug_code()) {
+ Check(cond, reason);
+ }
+}
+
+
+
+void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
+ if (emit_debug_code()) {
+ CheckRegisterIsClear(reg, reason);
+ }
+}
+
+
+void MacroAssembler::AssertRegisterIsRoot(Register reg,
+ Heap::RootListIndex index,
+ BailoutReason reason) {
+ if (emit_debug_code()) {
+ CompareRoot(reg, index);
+ Check(eq, reason);
+ }
+}
+
+
+void MacroAssembler::AssertFastElements(Register elements) {
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Label ok;
+ Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
+ JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
+ JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
+ JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
+ Bind(&ok);
+ }
+}
+
+
+void MacroAssembler::AssertIsString(const Register& object) {
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ STATIC_ASSERT(kSmiTag == 0);
+ Tst(object, kSmiTagMask);
+ Check(ne, kOperandIsNotAString);
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
+ Check(lo, kOperandIsNotAString);
+ }
+}
+
+
+void MacroAssembler::Check(Condition cond, BailoutReason reason) {
+ Label ok;
+ B(cond, &ok);
+ Abort(reason);
+ // Will not return here.
+ Bind(&ok);
+}
+
+
+void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
+ Label ok;
+ Cbz(reg, &ok);
+ Abort(reason);
+ // Will not return here.
+ Bind(&ok);
+}
+
+
+void MacroAssembler::Abort(BailoutReason reason) {
+#ifdef DEBUG
+ RecordComment("Abort message: ");
+ RecordComment(GetBailoutReason(reason));
+
+ if (FLAG_trap_on_abort) {
+ Brk(0);
+ return;
+ }
+#endif
+
+ // Abort is used in some contexts where csp is the stack pointer. In order to
+ // simplify the CallRuntime code, make sure that jssp is the stack pointer.
+ // There is no risk of register corruption here because Abort doesn't return.
+ Register old_stack_pointer = StackPointer();
+ SetStackPointer(jssp);
+ Mov(jssp, old_stack_pointer);
+
+ // We need some scratch registers for the MacroAssembler, so make sure we have
+ // some. This is safe here because Abort never returns.
+ RegList old_tmp_list = TmpList()->list();
+ TmpList()->Combine(ip0);
+ TmpList()->Combine(ip1);
+
+ if (use_real_aborts()) {
+ // Avoid infinite recursion; Push contains some assertions that use Abort.
+ NoUseRealAbortsScope no_real_aborts(this);
+
+ Mov(x0, Smi::FromInt(reason));
+ Push(x0);
+
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort, 1);
+ } else {
+ CallRuntime(Runtime::kAbort, 1);
+ }
+ } else {
+ // Load the string to pass to Printf.
+ Label msg_address;
+ Adr(x0, &msg_address);
+
+ // Call Printf directly to report the error.
+ CallPrintf();
+
+ // We need a way to stop execution on both the simulator and real hardware,
+ // and Unreachable() is the best option.
+ Unreachable();
+
+ // Emit the message string directly in the instruction stream.
+ {
+ BlockPoolsScope scope(this);
+ Bind(&msg_address);
+ EmitStringData(GetBailoutReason(reason));
+ }
+ }
+
+ SetStackPointer(old_stack_pointer);
+ TmpList()->set_list(old_tmp_list);
+}
+
+
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch1,
+ Register scratch2,
+ Label* no_map_match) {
+ // Load the global or builtins object from the current context.
+ Ldr(scratch1, GlobalObjectMemOperand());
+ Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
+
+ // Check that the function's map is the same as the expected cached map.
+ Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
+ size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
+ Ldr(scratch2, FieldMemOperand(scratch1, offset));
+ Cmp(map_in_out, scratch2);
+ B(ne, no_map_match);
+
+ // Use the transitioned cached map.
+ offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
+ Ldr(map_in_out, FieldMemOperand(scratch1, offset));
+}
+
+
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+ // Load the global or builtins object from the current context.
+ Ldr(function, GlobalObjectMemOperand());
+ // Load the native context from the global or builtins object.
+ Ldr(function, FieldMemOperand(function,
+ GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
+ Ldr(function, ContextMemOperand(function, index));
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch) {
+ // Load the initial map. The global functions all have initial maps.
+ Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ if (emit_debug_code()) {
+ Label ok, fail;
+ CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
+ B(&ok);
+ Bind(&fail);
+ Abort(kGlobalFunctionsMustHaveInitialMap);
+ Bind(&ok);
+ }
+}
+
+
+// This is the main Printf implementation. All other Printf variants call
+// PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
+void MacroAssembler::PrintfNoPreserve(const char * format,
+ const CPURegister& arg0,
+ const CPURegister& arg1,
+ const CPURegister& arg2,
+ const CPURegister& arg3) {
+ // We cannot handle a caller-saved stack pointer. It doesn't make much sense
+ // in most cases anyway, so this restriction shouldn't be too serious.
+ ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
+
+ // Make sure that the macro assembler doesn't try to use any of our arguments
+ // as scratch registers.
+ ASSERT(!TmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3));
+ ASSERT(!FPTmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3));
+
+ // We cannot print the stack pointer because it is typically used to preserve
+ // caller-saved registers (using other Printf variants which depend on this
+ // helper).
+ ASSERT(!AreAliased(arg0, StackPointer()));
+ ASSERT(!AreAliased(arg1, StackPointer()));
+ ASSERT(!AreAliased(arg2, StackPointer()));
+ ASSERT(!AreAliased(arg3, StackPointer()));
+
+ static const int kMaxArgCount = 4;
+ // Assume that we have the maximum number of arguments until we know
+ // otherwise.
+ int arg_count = kMaxArgCount;
+
+ // The provided arguments.
+ CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3};
+
+ // The PCS registers where the arguments need to end up.
+ CPURegister pcs[kMaxArgCount] = {NoCPUReg, NoCPUReg, NoCPUReg, NoCPUReg};
+
+ // Promote FP arguments to doubles, and integer arguments to X registers.
+ // Note that FP and integer arguments cannot be mixed, but we'll check
+ // AreSameSizeAndType once we've processed these promotions.
+ for (int i = 0; i < kMaxArgCount; i++) {
+ if (args[i].IsRegister()) {
+ // Note that we use x1 onwards, because x0 will hold the format string.
+ pcs[i] = Register::XRegFromCode(i + 1);
+ // For simplicity, we handle all integer arguments as X registers. An X
+ // register argument takes the same space as a W register argument in the
+ // PCS anyway. The only limitation is that we must explicitly clear the
+ // top word for W register arguments as the callee will expect it to be
+ // clear.
+ if (!args[i].Is64Bits()) {
+ const Register& as_x = args[i].X();
+ And(as_x, as_x, 0x00000000ffffffff);
+ args[i] = as_x;
+ }
+ } else if (args[i].IsFPRegister()) {
+ pcs[i] = FPRegister::DRegFromCode(i);
+ // C and C++ varargs functions (such as printf) implicitly promote float
+ // arguments to doubles.
+ if (!args[i].Is64Bits()) {
+ FPRegister s(args[i]);
+ const FPRegister& as_d = args[i].D();
+ Fcvt(as_d, s);
+ args[i] = as_d;
+ }
+ } else {
+ // This is the first empty (NoCPUReg) argument, so use it to set the
+ // argument count and bail out.
+ arg_count = i;
+ break;
+ }
+ }
+ ASSERT((arg_count >= 0) && (arg_count <= kMaxArgCount));
+ // Check that every remaining argument is NoCPUReg.
+ for (int i = arg_count; i < kMaxArgCount; i++) {
+ ASSERT(args[i].IsNone());
+ }
+ ASSERT((arg_count == 0) || AreSameSizeAndType(args[0], args[1],
+ args[2], args[3],
+ pcs[0], pcs[1],
+ pcs[2], pcs[3]));
+
+ // Move the arguments into the appropriate PCS registers.
+ //
+ // Arranging an arbitrary list of registers into x1-x4 (or d0-d3) is
+ // surprisingly complicated.
+ //
+ // * For even numbers of registers, we push the arguments and then pop them
+ // into their final registers. This maintains 16-byte stack alignment in
+ // case csp is the stack pointer, since we're only handling X or D
+ // registers at this point.
+ //
+ // * For odd numbers of registers, we push and pop all but one register in
+ // the same way, but the left-over register is moved directly, since we
+ // can always safely move one register without clobbering any source.
+ if (arg_count >= 4) {
+ Push(args[3], args[2], args[1], args[0]);
+ } else if (arg_count >= 2) {
+ Push(args[1], args[0]);
+ }
+
+ if ((arg_count % 2) != 0) {
+ // Move the left-over register directly.
+ const CPURegister& leftover_arg = args[arg_count - 1];
+ const CPURegister& leftover_pcs = pcs[arg_count - 1];
+ if (leftover_arg.IsRegister()) {
+ Mov(Register(leftover_pcs), Register(leftover_arg));
+ } else {
+ Fmov(FPRegister(leftover_pcs), FPRegister(leftover_arg));
+ }
+ }
+
+ if (arg_count >= 4) {
+ Pop(pcs[0], pcs[1], pcs[2], pcs[3]);
+ } else if (arg_count >= 2) {
+ Pop(pcs[0], pcs[1]);
+ }
+
+ // Load the format string into x0, as per the procedure-call standard.
+ //
+ // To make the code as portable as possible, the format string is encoded
+ // directly in the instruction stream. It might be cleaner to encode it in a
+ // literal pool, but since Printf is usually used for debugging, it is
+ // beneficial for it to be minimally dependent on other features.
+ Label format_address;
+ Adr(x0, &format_address);
+
+ // Emit the format string directly in the instruction stream.
+ { BlockPoolsScope scope(this);
+ Label after_data;
+ B(&after_data);
+ Bind(&format_address);
+ EmitStringData(format);
+ Unreachable();
+ Bind(&after_data);
+ }
+
+ // We don't pass any arguments on the stack, but we still need to align the C
+ // stack pointer to a 16-byte boundary for PCS compliance.
+ if (!csp.Is(StackPointer())) {
+ Bic(csp, StackPointer(), 0xf);
+ }
+
+ CallPrintf(pcs[0].type());
+}
+
+
+void MacroAssembler::CallPrintf(CPURegister::RegisterType type) {
+ // A call to printf needs special handling for the simulator, since the system
+ // printf function will use a different instruction set and the procedure-call
+ // standard will not be compatible.
+#ifdef USE_SIMULATOR
+ { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
+ hlt(kImmExceptionIsPrintf);
+ dc32(type);
+ }
+#else
+ Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
+#endif
+}
+
+
+void MacroAssembler::Printf(const char * format,
+ const CPURegister& arg0,
+ const CPURegister& arg1,
+ const CPURegister& arg2,
+ const CPURegister& arg3) {
+ // Printf is expected to preserve all registers, so make sure that none are
+ // available as scratch registers until we've preserved them.
+ RegList old_tmp_list = TmpList()->list();
+ RegList old_fp_tmp_list = FPTmpList()->list();
+ TmpList()->set_list(0);
+ FPTmpList()->set_list(0);
+
+ // Preserve all caller-saved registers as well as NZCV.
+ // If csp is the stack pointer, PushCPURegList asserts that the size of each
+ // list is a multiple of 16 bytes.
+ PushCPURegList(kCallerSaved);
+ PushCPURegList(kCallerSavedFP);
+
+ // We can use caller-saved registers as scratch values (except for argN).
+ CPURegList tmp_list = kCallerSaved;
+ CPURegList fp_tmp_list = kCallerSavedFP;
+ tmp_list.Remove(arg0, arg1, arg2, arg3);
+ fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
+ TmpList()->set_list(tmp_list.list());
+ FPTmpList()->set_list(fp_tmp_list.list());
+
+ // Preserve NZCV.
+ { UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Mrs(tmp, NZCV);
+ Push(tmp, xzr);
+ }
+
+ PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
+
+ { UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Pop(xzr, tmp);
+ Msr(NZCV, tmp);
+ }
+
+ PopCPURegList(kCallerSavedFP);
+ PopCPURegList(kCallerSaved);
+
+ TmpList()->set_list(old_tmp_list);
+ FPTmpList()->set_list(old_fp_tmp_list);
+}
+
+
+void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
+ // TODO(jbramley): Other architectures use the internal memcpy to copy the
+ // sequence. If this is a performance bottleneck, we should consider caching
+ // the sequence and copying it in the same way.
+ InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize);
+ ASSERT(jssp.Is(StackPointer()));
+ EmitFrameSetupForCodeAgePatching(this);
+}
+
+
+
+void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
+ InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize);
+ ASSERT(jssp.Is(StackPointer()));
+ EmitCodeAgeSequence(this, stub);
+}
+
+
+#undef __
+#define __ assm->
+
+
+void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
+ Label start;
+ __ bind(&start);
+
+ // We can do this sequence using four instructions, but the code ageing
+ // sequence that patches it needs five, so we use the extra space to try to
+ // simplify some addressing modes and remove some dependencies (compared to
+ // using two stp instructions with write-back).
+ __ sub(jssp, jssp, 4 * kXRegSize);
+ __ sub(csp, csp, 4 * kXRegSize);
+ __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize));
+ __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
+ __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);
+}
+
+
+void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
+ Code * stub) {
+ Label start;
+ __ bind(&start);
+ // When the stub is called, the sequence is replaced with the young sequence
+ // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
+ // stub jumps to &start, stored in x0. The young sequence does not call the
+ // stub so there is no infinite loop here.
+ //
+ // A branch (br) is used rather than a call (blr) because this code replaces
+ // the frame setup code that would normally preserve lr.
+ __ LoadLiteral(ip0, kCodeAgeStubEntryOffset);
+ __ adr(x0, &start);
+ __ br(ip0);
+ // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
+ // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
+ __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
+ if (stub) {
+ __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
+ __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);
+ }
+}
+
+
+bool MacroAssembler::IsYoungSequence(byte* sequence) {
+ // Generate a young sequence to compare with.
+ const int length = kCodeAgeSequenceSize / kInstructionSize;
+ static bool initialized = false;
+ static byte young[kCodeAgeSequenceSize];
+ if (!initialized) {
+ PatchingAssembler patcher(young, length);
+ // The young sequence is the frame setup code for FUNCTION code types. It is
+ // generated by FullCodeGenerator::Generate.
+ MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
+ initialized = true;
+ }
+
+ bool is_young = (memcmp(sequence, young, kCodeAgeSequenceSize) == 0);
+ ASSERT(is_young || IsCodeAgeSequence(sequence));
+ return is_young;
+}
+
+
+#ifdef DEBUG
+bool MacroAssembler::IsCodeAgeSequence(byte* sequence) {
+ // The old sequence varies depending on the code age. However, the code up
+ // until kCodeAgeStubEntryOffset does not change, so we can check that part to
+ // get a reasonable level of verification.
+ const int length = kCodeAgeStubEntryOffset / kInstructionSize;
+ static bool initialized = false;
+ static byte old[kCodeAgeStubEntryOffset];
+ if (!initialized) {
+ PatchingAssembler patcher(old, length);
+ MacroAssembler::EmitCodeAgeSequence(&patcher, NULL);
+ initialized = true;
+ }
+ return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0;
+}
+#endif
+
+
+void MacroAssembler::TruncatingDiv(Register result,
+ Register dividend,
+ int32_t divisor) {
+ ASSERT(!AreAliased(result, dividend));
+ ASSERT(result.Is32Bits() && dividend.Is32Bits());
+ MultiplierAndShift ms(divisor);
+ Mov(result, ms.multiplier());
+ Smull(result.X(), dividend, result);
+ Asr(result.X(), result.X(), 32);
+ if (divisor > 0 && ms.multiplier() < 0) Add(result, result, dividend);
+ if (divisor < 0 && ms.multiplier() > 0) Sub(result, result, dividend);
+ if (ms.shift() > 0) Asr(result, result, ms.shift());
+ Add(result, result, Operand(dividend, LSR, 31));
+}
+
+
+#undef __
+
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+ available_->set_list(old_available_);
+ availablefp_->set_list(old_availablefp_);
+}
+
+
+Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
+ int code = AcquireNextAvailable(available_).code();
+ return Register::Create(code, reg.SizeInBits());
+}
+
+
+FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
+ int code = AcquireNextAvailable(availablefp_).code();
+ return FPRegister::Create(code, reg.SizeInBits());
+}
+
+
+CPURegister UseScratchRegisterScope::AcquireNextAvailable(
+ CPURegList* available) {
+ CHECK(!available->IsEmpty());
+ CPURegister result = available->PopLowestIndex();
+ ASSERT(!AreAliased(result, xzr, csp));
+ return result;
+}
+
+
+CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
+ const CPURegister& reg) {
+ ASSERT(available->IncludesAliasOf(reg));
+ available->Remove(reg);
+ return reg;
+}
+
+
+#define __ masm->
+
+
+void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
+ const Label* smi_check) {
+ Assembler::BlockPoolsScope scope(masm);
+ if (reg.IsValid()) {
+ ASSERT(smi_check->is_bound());
+ ASSERT(reg.Is64Bits());
+
+ // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
+ // 'check' in the other bits. The possible offset is limited in that we
+ // use BitField to pack the data, and the underlying data type is a
+ // uint32_t.
+ uint32_t delta = __ InstructionsGeneratedSince(smi_check);
+ __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
+ } else {
+ ASSERT(!smi_check->is_bound());
+
+ // An offset of 0 indicates that there is no patch site.
+ __ InlineData(0);
+ }
+}
+
+
+InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
+ : reg_(NoReg), smi_check_(NULL) {
+ InstructionSequence* inline_data = InstructionSequence::At(info);
+ ASSERT(inline_data->IsInlineData());
+ if (inline_data->IsInlineData()) {
+ uint64_t payload = inline_data->InlineData();
+ // We use BitField to decode the payload, and BitField can only handle
+ // 32-bit values.
+ ASSERT(is_uint32(payload));
+ if (payload != 0) {
+ int reg_code = RegisterBits::decode(payload);
+ reg_ = Register::XRegFromCode(reg_code);
+ uint64_t smi_check_delta = DeltaBits::decode(payload);
+ ASSERT(smi_check_delta != 0);
+ smi_check_ = inline_data->preceding(smi_check_delta);
+ }
+ }
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
new file mode 100644
index 000000000..1777c38e3
--- /dev/null
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -0,0 +1,2310 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
+#define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
+
+#include <vector>
+
+#include "v8globals.h"
+#include "globals.h"
+
+#include "arm64/assembler-arm64-inl.h"
+
+namespace v8 {
+namespace internal {
+
+#define LS_MACRO_LIST(V) \
+ V(Ldrb, Register&, rt, LDRB_w) \
+ V(Strb, Register&, rt, STRB_w) \
+ V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
+ V(Ldrh, Register&, rt, LDRH_w) \
+ V(Strh, Register&, rt, STRH_w) \
+ V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
+ V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
+ V(Str, CPURegister&, rt, StoreOpFor(rt)) \
+ V(Ldrsw, Register&, rt, LDRSW_x)
+
+
+// ----------------------------------------------------------------------------
+// Static helper functions
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, int offset);
+inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
+
+// Generate a MemOperand for loading a SMI from memory.
+inline MemOperand UntagSmiMemOperand(Register object, int offset);
+
+
+// ----------------------------------------------------------------------------
+// MacroAssembler
+
+enum BranchType {
+ // Copies of architectural conditions.
+ // The associated conditions can be used in place of those, the code will
+ // take care of reinterpreting them with the correct type.
+ integer_eq = eq,
+ integer_ne = ne,
+ integer_hs = hs,
+ integer_lo = lo,
+ integer_mi = mi,
+ integer_pl = pl,
+ integer_vs = vs,
+ integer_vc = vc,
+ integer_hi = hi,
+ integer_ls = ls,
+ integer_ge = ge,
+ integer_lt = lt,
+ integer_gt = gt,
+ integer_le = le,
+ integer_al = al,
+ integer_nv = nv,
+
+ // These two are *different* from the architectural codes al and nv.
+ // 'always' is used to generate unconditional branches.
+ // 'never' is used to not generate a branch (generally as the inverse
+ // branch type of 'always).
+ always, never,
+ // cbz and cbnz
+ reg_zero, reg_not_zero,
+ // tbz and tbnz
+ reg_bit_clear, reg_bit_set,
+
+ // Aliases.
+ kBranchTypeFirstCondition = eq,
+ kBranchTypeLastCondition = nv,
+ kBranchTypeFirstUsingReg = reg_zero,
+ kBranchTypeFirstUsingBit = reg_bit_clear
+};
+
+inline BranchType InvertBranchType(BranchType type) {
+ if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
+ return static_cast<BranchType>(
+ InvertCondition(static_cast<Condition>(type)));
+ } else {
+ return static_cast<BranchType>(type ^ 1);
+ }
+}
+
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
+enum TargetAddressStorageMode {
+ CAN_INLINE_TARGET_ADDRESS,
+ NEVER_INLINE_TARGET_ADDRESS
+};
+enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag };
+enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles };
+enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
+enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
+enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
+
+class MacroAssembler : public Assembler {
+ public:
+ MacroAssembler(Isolate* isolate, byte * buffer, unsigned buffer_size);
+
+ inline Handle<Object> CodeObject();
+
+ // Instruction set functions ------------------------------------------------
+ // Logical macros.
+ inline void And(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Tst(const Register& rn, const Operand& operand);
+ void LogicalMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op);
+
+ // Add and sub macros.
+ inline void Add(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Cmn(const Register& rn, const Operand& operand);
+ inline void Cmp(const Register& rn, const Operand& operand);
+ inline void Neg(const Register& rd,
+ const Operand& operand);
+ inline void Negs(const Register& rd,
+ const Operand& operand);
+
+ void AddSubMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op);
+
+ // Add/sub with carry macros.
+ inline void Adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ inline void Ngc(const Register& rd,
+ const Operand& operand);
+ inline void Ngcs(const Register& rd,
+ const Operand& operand);
+ void AddSubWithCarryMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+ // Move macros.
+ void Mov(const Register& rd,
+ const Operand& operand,
+ DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
+ void Mov(const Register& rd, uint64_t imm);
+ inline void Mvn(const Register& rd, uint64_t imm);
+ void Mvn(const Register& rd, const Operand& operand);
+ static bool IsImmMovn(uint64_t imm, unsigned reg_size);
+ static bool IsImmMovz(uint64_t imm, unsigned reg_size);
+ static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
+
+ // Conditional macros.
+ inline void Ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+ inline void Ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+ void ConditionalCompareMacro(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op);
+ void Csel(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ Condition cond);
+
+ // Load/store macros.
+#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
+ inline void FN(const REGTYPE REG, const MemOperand& addr);
+ LS_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+ void LoadStoreMacro(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op);
+
+ // V8-specific load/store helpers.
+ void Load(const Register& rt, const MemOperand& addr, Representation r);
+ void Store(const Register& rt, const MemOperand& addr, Representation r);
+
+ // Remaining instructions are simple pass-through calls to the assembler.
+ inline void Adr(const Register& rd, Label* label);
+ inline void Asr(const Register& rd, const Register& rn, unsigned shift);
+ inline void Asr(const Register& rd, const Register& rn, const Register& rm);
+
+ // Branch type inversion relies on these relations.
+ STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
+ (reg_bit_clear == (reg_bit_set ^ 1)) &&
+ (always == (never ^ 1)));
+
+ void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
+
+ inline void B(Label* label);
+ inline void B(Condition cond, Label* label);
+ void B(Label* label, Condition cond);
+ inline void Bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Bind(Label* label);
+ inline void Bl(Label* label);
+ inline void Blr(const Register& xn);
+ inline void Br(const Register& xn);
+ inline void Brk(int code);
+ void Cbnz(const Register& rt, Label* label);
+ void Cbz(const Register& rt, Label* label);
+ inline void Cinc(const Register& rd, const Register& rn, Condition cond);
+ inline void Cinv(const Register& rd, const Register& rn, Condition cond);
+ inline void Cls(const Register& rd, const Register& rn);
+ inline void Clz(const Register& rd, const Register& rn);
+ inline void Cneg(const Register& rd, const Register& rn, Condition cond);
+ inline void CzeroX(const Register& rd, Condition cond);
+ inline void CmovX(const Register& rd, const Register& rn, Condition cond);
+ inline void Cset(const Register& rd, Condition cond);
+ inline void Csetm(const Register& rd, Condition cond);
+ inline void Csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+ inline void Csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+ inline void Csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+ inline void Dmb(BarrierDomain domain, BarrierType type);
+ inline void Dsb(BarrierDomain domain, BarrierType type);
+ inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
+ inline void Extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb);
+ inline void Fabs(const FPRegister& fd, const FPRegister& fn);
+ inline void Fadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fccmp(const FPRegister& fn,
+ const FPRegister& fm,
+ StatusFlags nzcv,
+ Condition cond);
+ inline void Fcmp(const FPRegister& fn, const FPRegister& fm);
+ inline void Fcmp(const FPRegister& fn, double value);
+ inline void Fcsel(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ Condition cond);
+ inline void Fcvt(const FPRegister& fd, const FPRegister& fn);
+ inline void Fcvtas(const Register& rd, const FPRegister& fn);
+ inline void Fcvtau(const Register& rd, const FPRegister& fn);
+ inline void Fcvtms(const Register& rd, const FPRegister& fn);
+ inline void Fcvtmu(const Register& rd, const FPRegister& fn);
+ inline void Fcvtns(const Register& rd, const FPRegister& fn);
+ inline void Fcvtnu(const Register& rd, const FPRegister& fn);
+ inline void Fcvtzs(const Register& rd, const FPRegister& fn);
+ inline void Fcvtzu(const Register& rd, const FPRegister& fn);
+ inline void Fdiv(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Fmax(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmaxnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmin(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fminnm(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fmov(FPRegister fd, FPRegister fn);
+ inline void Fmov(FPRegister fd, Register rn);
+ // Provide explicit double and float interfaces for FP immediate moves, rather
+ // than relying on implicit C++ casts. This allows signalling NaNs to be
+ // preserved when the immediate matches the format of fd. Most systems convert
+ // signalling NaNs to quiet NaNs when converting between float and double.
+ inline void Fmov(FPRegister fd, double imm);
+ inline void Fmov(FPRegister fd, float imm);
+ // Provide a template to allow other types to be converted automatically.
+ template<typename T>
+ void Fmov(FPRegister fd, T imm) {
+ ASSERT(allow_macro_instructions_);
+ Fmov(fd, static_cast<double>(imm));
+ }
+ inline void Fmov(Register rd, FPRegister fn);
+ inline void Fmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Fmul(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Fneg(const FPRegister& fd, const FPRegister& fn);
+ inline void Fnmadd(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Fnmsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm,
+ const FPRegister& fa);
+ inline void Frinta(const FPRegister& fd, const FPRegister& fn);
+ inline void Frintn(const FPRegister& fd, const FPRegister& fn);
+ inline void Frintz(const FPRegister& fd, const FPRegister& fn);
+ inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
+ inline void Fsub(const FPRegister& fd,
+ const FPRegister& fn,
+ const FPRegister& fm);
+ inline void Hint(SystemHint code);
+ inline void Hlt(int code);
+ inline void Isb();
+ inline void Ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src);
+ inline void Ldp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src);
+ inline void Ldpsw(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src);
+ // Provide both double and float interfaces for FP immediate loads, rather
+ // than relying on implicit C++ casts. This allows signalling NaNs to be
+ // preserved when the immediate matches the format of fd. Most systems convert
+ // signalling NaNs to quiet NaNs when converting between float and double.
+ inline void Ldr(const FPRegister& ft, double imm);
+ inline void Ldr(const FPRegister& ft, float imm);
+ inline void Ldr(const Register& rt, uint64_t imm);
+ inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
+ inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
+ inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
+ inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
+ inline void Madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
+ inline void Mov(const Register& rd, const Register& rm);
+ inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
+ inline void Mrs(const Register& rt, SystemRegister sysreg);
+ inline void Msr(SystemRegister sysreg, const Register& rt);
+ inline void Msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Mul(const Register& rd, const Register& rn, const Register& rm);
+ inline void Nop() { nop(); }
+ inline void Rbit(const Register& rd, const Register& rn);
+ inline void Ret(const Register& xn = lr);
+ inline void Rev(const Register& rd, const Register& rn);
+ inline void Rev16(const Register& rd, const Register& rn);
+ inline void Rev32(const Register& rd, const Register& rn);
+ inline void Ror(const Register& rd, const Register& rs, unsigned shift);
+ inline void Ror(const Register& rd, const Register& rn, const Register& rm);
+ inline void Sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Scvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits = 0);
+ inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
+ inline void Smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Smull(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+ inline void Smulh(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+ inline void Stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst);
+ inline void Stp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst);
+ inline void Sxtb(const Register& rd, const Register& rn);
+ inline void Sxth(const Register& rd, const Register& rn);
+ inline void Sxtw(const Register& rd, const Register& rn);
+ void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
+ void Tbz(const Register& rt, unsigned bit_pos, Label* label);
+ inline void Ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width);
+ inline void Ucvtf(const FPRegister& fd,
+ const Register& rn,
+ unsigned fbits = 0);
+ inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
+ inline void Umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+ inline void Uxtb(const Register& rd, const Register& rn);
+ inline void Uxth(const Register& rd, const Register& rn);
+ inline void Uxtw(const Register& rd, const Register& rn);
+
+ // Pseudo-instructions ------------------------------------------------------
+
+ // Compute rd = abs(rm).
+ // This function clobbers the condition flags.
+ //
+ // If rm is the minimum representable value, the result is not representable.
+ // Handlers for each case can be specified using the relevant labels.
+ void Abs(const Register& rd, const Register& rm,
+ Label * is_not_representable = NULL,
+ Label * is_representable = NULL);
+
+ // Push or pop up to 4 registers of the same width to or from the stack,
+ // using the current stack pointer as set by SetStackPointer.
+ //
+ // If an argument register is 'NoReg', all further arguments are also assumed
+ // to be 'NoReg', and are thus not pushed or popped.
+ //
+ // Arguments are ordered such that "Push(a, b);" is functionally equivalent
+ // to "Push(a); Push(b);".
+ //
+ // It is valid to push the same register more than once, and there is no
+ // restriction on the order in which registers are specified.
+ //
+ // It is not valid to pop into the same register more than once in one
+ // operation, not even into the zero register.
+ //
+ // If the current stack pointer (as set by SetStackPointer) is csp, then it
+ // must be aligned to 16 bytes on entry and the total size of the specified
+ // registers must also be a multiple of 16 bytes.
+ //
+ // Even if the current stack pointer is not the system stack pointer (csp),
+ // Push (and derived methods) will still modify the system stack pointer in
+ // order to comply with ABI rules about accessing memory below the system
+ // stack pointer.
+ //
+ // Other than the registers passed into Pop, the stack pointer and (possibly)
+ // the system stack pointer, these methods do not modify any other registers.
+ void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
+ const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
+ void Push(const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3,
+ const CPURegister& src4, const CPURegister& src5 = NoReg,
+ const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
+ void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
+ const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
+
+ // Alternative forms of Push and Pop, taking a RegList or CPURegList that
+ // specifies the registers that are to be pushed or popped. Higher-numbered
+ // registers are associated with higher memory addresses (as in the A32 push
+ // and pop instructions).
+ //
+ // (Push|Pop)SizeRegList allow you to specify the register size as a
+ // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
+ // kSRegSizeInBits are supported.
+ //
+ // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
+ void PushCPURegList(CPURegList registers);
+ void PopCPURegList(CPURegList registers);
+
+ inline void PushSizeRegList(RegList registers, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PushCPURegList(CPURegList(type, reg_size, registers));
+ }
+ inline void PopSizeRegList(RegList registers, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PopCPURegList(CPURegList(type, reg_size, registers));
+ }
+ inline void PushXRegList(RegList regs) {
+ PushSizeRegList(regs, kXRegSizeInBits);
+ }
+ inline void PopXRegList(RegList regs) {
+ PopSizeRegList(regs, kXRegSizeInBits);
+ }
+ inline void PushWRegList(RegList regs) {
+ PushSizeRegList(regs, kWRegSizeInBits);
+ }
+ inline void PopWRegList(RegList regs) {
+ PopSizeRegList(regs, kWRegSizeInBits);
+ }
+ inline void PushDRegList(RegList regs) {
+ PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
+ }
+ inline void PopDRegList(RegList regs) {
+ PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
+ }
+ inline void PushSRegList(RegList regs) {
+ PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
+ }
+ inline void PopSRegList(RegList regs) {
+ PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
+ }
+
+ // Push the specified register 'count' times.
+ void PushMultipleTimes(CPURegister src, Register count);
+ void PushMultipleTimes(CPURegister src, int count);
+
+ // This is a convenience method for pushing a single Handle<Object>.
+ inline void Push(Handle<Object> handle);
+ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+
+ // Aliases of Push and Pop, required for V8 compatibility.
+ inline void push(Register src) {
+ Push(src);
+ }
+ inline void pop(Register dst) {
+ Pop(dst);
+ }
+
+ // Sometimes callers need to push or pop multiple registers in a way that is
+ // difficult to structure efficiently for fixed Push or Pop calls. This scope
+ // allows push requests to be queued up, then flushed at once. The
+ // MacroAssembler will try to generate the most efficient sequence required.
+ //
+ // Unlike the other Push and Pop macros, PushPopQueue can handle mixed sets of
+ // register sizes and types.
+ class PushPopQueue {
+ public:
+ explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) { }
+
+ ~PushPopQueue() {
+ ASSERT(queued_.empty());
+ }
+
+ void Queue(const CPURegister& rt) {
+ size_ += rt.SizeInBytes();
+ queued_.push_back(rt);
+ }
+
+ void PushQueued();
+ void PopQueued();
+
+ private:
+ MacroAssembler* masm_;
+ int size_;
+ std::vector<CPURegister> queued_;
+ };
+
+ // Poke 'src' onto the stack. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void Poke(const CPURegister& src, const Operand& offset);
+
+ // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void Peek(const CPURegister& dst, const Operand& offset);
+
+ // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
+ // with 'src2' at a higher address than 'src1'. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
+
+ // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
+ // values peeked will be adjacent, with the value in 'dst2' being from a
+ // higher address than 'dst1'. The offset is in bytes.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then
+ // csp must be aligned to 16 bytes.
+ void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
+
+ // Claim or drop stack space without actually accessing memory.
+ //
+ // In debug mode, both of these will write invalid data into the claimed or
+ // dropped space.
+ //
+ // If the current stack pointer (according to StackPointer()) is csp, then it
+ // must be aligned to 16 bytes and the size claimed or dropped must be a
+ // multiple of 16 bytes.
+ //
+ // Note that unit_size must be specified in bytes. For variants which take a
+ // Register count, the unit size must be a power of two.
+ inline void Claim(uint64_t count, uint64_t unit_size = kXRegSize);
+ inline void Claim(const Register& count,
+ uint64_t unit_size = kXRegSize);
+ inline void Drop(uint64_t count, uint64_t unit_size = kXRegSize);
+ inline void Drop(const Register& count,
+ uint64_t unit_size = kXRegSize);
+
+ // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
+ // register.
+ inline void ClaimBySMI(const Register& count_smi,
+ uint64_t unit_size = kXRegSize);
+ inline void DropBySMI(const Register& count_smi,
+ uint64_t unit_size = kXRegSize);
+
+ // Compare a register with an operand, and branch to label depending on the
+ // condition. May corrupt the status flags.
+ inline void CompareAndBranch(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* label);
+
+ // Test the bits of register defined by bit_pattern, and branch if ANY of
+ // those bits are set. May corrupt the status flags.
+ inline void TestAndBranchIfAnySet(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label);
+
+ // Test the bits of register defined by bit_pattern, and branch if ALL of
+ // those bits are clear (ie. not set.) May corrupt the status flags.
+ inline void TestAndBranchIfAllClear(const Register& reg,
+ const uint64_t bit_pattern,
+ Label* label);
+
+ // Insert one or more instructions into the instruction stream that encode
+ // some caller-defined data. The instructions used will be executable with no
+ // side effects.
+ inline void InlineData(uint64_t data);
+
+ // Insert an instrumentation enable marker into the instruction stream.
+ inline void EnableInstrumentation();
+
+ // Insert an instrumentation disable marker into the instruction stream.
+ inline void DisableInstrumentation();
+
+ // Insert an instrumentation event marker into the instruction stream. These
+ // will be picked up by the instrumentation system to annotate an instruction
+ // profile. The argument marker_name must be a printable two character string;
+ // it will be encoded in the event marker.
+ inline void AnnotateInstrumentation(const char* marker_name);
+
+ // If emit_debug_code() is true, emit a run-time check to ensure that
+ // StackPointer() does not point below the system stack pointer.
+ //
+ // Whilst it is architecturally legal for StackPointer() to point below csp,
+ // it can be evidence of a potential bug because the ABI forbids accesses
+ // below csp.
+ //
+ // If emit_debug_code() is false, this emits no code.
+ //
+ // If StackPointer() is the system stack pointer, this emits no code.
+ void AssertStackConsistency();
+
+ // Preserve the callee-saved registers (as defined by AAPCS64).
+ //
+ // Higher-numbered registers are pushed before lower-numbered registers, and
+ // thus get higher addresses.
+ // Floating-point registers are pushed before general-purpose registers, and
+ // thus get higher addresses.
+ //
+ // Note that registers are not checked for invalid values. Use this method
+ // only if you know that the GC won't try to examine the values on the stack.
+ //
+ // This method must not be called unless the current stack pointer (as set by
+ // SetStackPointer) is the system stack pointer (csp), and is aligned to
+ // ActivationFrameAlignment().
+ void PushCalleeSavedRegisters();
+
+ // Restore the callee-saved registers (as defined by AAPCS64).
+ //
+ // Higher-numbered registers are popped after lower-numbered registers, and
+ // thus come from higher addresses.
+ // Floating-point registers are popped after general-purpose registers, and
+ // thus come from higher addresses.
+ //
+ // This method must not be called unless the current stack pointer (as set by
+ // SetStackPointer) is the system stack pointer (csp), and is aligned to
+ // ActivationFrameAlignment().
+ void PopCalleeSavedRegisters();
+
+ // Set the current stack pointer, but don't generate any code.
+ inline void SetStackPointer(const Register& stack_pointer) {
+ ASSERT(!TmpList()->IncludesAliasOf(stack_pointer));
+ sp_ = stack_pointer;
+ }
+
+ // Return the current stack pointer, as set by SetStackPointer.
+ inline const Register& StackPointer() const {
+ return sp_;
+ }
+
+ // Align csp for a frame, as per ActivationFrameAlignment, and make it the
+ // current stack pointer.
+ inline void AlignAndSetCSPForFrame() {
+ int sp_alignment = ActivationFrameAlignment();
+ // AAPCS64 mandates at least 16-byte alignment.
+ ASSERT(sp_alignment >= 16);
+ ASSERT(IsPowerOf2(sp_alignment));
+ Bic(csp, StackPointer(), sp_alignment - 1);
+ SetStackPointer(csp);
+ }
+
+ // Push the system stack pointer (csp) down to allow the same to be done to
+ // the current stack pointer (according to StackPointer()). This must be
+ // called _before_ accessing the memory.
+ //
+ // This is necessary when pushing or otherwise adding things to the stack, to
+ // satisfy the AAPCS64 constraint that the memory below the system stack
+ // pointer is not accessed.
+ //
+ // This method asserts that StackPointer() is not csp, since the call does
+ // not make sense in that context.
+ inline void BumpSystemStackPointer(const Operand& space);
+
+ // Helpers ------------------------------------------------------------------
+ // Root register.
+ inline void InitializeRootRegister();
+
+ // Load an object from the root table.
+ void LoadRoot(Register destination,
+ Heap::RootListIndex index);
+ // Store an object to the root table.
+ void StoreRoot(Register source,
+ Heap::RootListIndex index);
+
+ // Load both TrueValue and FalseValue roots.
+ void LoadTrueFalseRoots(Register true_root, Register false_root);
+
+ void LoadHeapObject(Register dst, Handle<HeapObject> object);
+
+ void LoadObject(Register result, Handle<Object> object) {
+ AllowDeferredHandleDereference heap_object_check;
+ if (object->IsHeapObject()) {
+ LoadHeapObject(result, Handle<HeapObject>::cast(object));
+ } else {
+ ASSERT(object->IsSmi());
+ Mov(result, Operand(object));
+ }
+ }
+
+ static int SafepointRegisterStackIndex(int reg_code);
+
+ // This is required for compatibility with architecture independant code.
+ // Remove if not needed.
+ inline void Move(Register dst, Register src) { Mov(dst, src); }
+
+ void LoadInstanceDescriptors(Register map,
+ Register descriptors);
+ void EnumLengthUntagged(Register dst, Register map);
+ void EnumLengthSmi(Register dst, Register map);
+ void NumberOfOwnDescriptors(Register dst, Register map);
+
+ template<typename Field>
+ void DecodeField(Register reg) {
+ static const uint64_t shift = Field::kShift + kSmiShift;
+ static const uint64_t setbits = CountSetBits(Field::kMask, 32);
+ Ubfx(reg, reg, shift, setbits);
+ }
+
+ // ---- SMI and Number Utilities ----
+
+ inline void SmiTag(Register dst, Register src);
+ inline void SmiTag(Register smi);
+ inline void SmiUntag(Register dst, Register src);
+ inline void SmiUntag(Register smi);
+ inline void SmiUntagToDouble(FPRegister dst,
+ Register src,
+ UntagMode mode = kNotSpeculativeUntag);
+ inline void SmiUntagToFloat(FPRegister dst,
+ Register src,
+ UntagMode mode = kNotSpeculativeUntag);
+
+ // Compute the absolute value of 'smi' and leave the result in 'smi'
+ // register. If 'smi' is the most negative SMI, the absolute value cannot
+ // be represented as a SMI and a jump to 'slow' is done.
+ void SmiAbs(const Register& smi, Label* slow);
+
+ inline void JumpIfSmi(Register value,
+ Label* smi_label,
+ Label* not_smi_label = NULL);
+ inline void JumpIfNotSmi(Register value, Label* not_smi_label);
+ inline void JumpIfBothSmi(Register value1,
+ Register value2,
+ Label* both_smi_label,
+ Label* not_smi_label = NULL);
+ inline void JumpIfEitherSmi(Register value1,
+ Register value2,
+ Label* either_smi_label,
+ Label* not_smi_label = NULL);
+ inline void JumpIfEitherNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label);
+ inline void JumpIfBothNotSmi(Register value1,
+ Register value2,
+ Label* not_smi_label);
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
+ void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
+
+ // Abort execution if argument is not a name, enabled via --debug-code.
+ void AssertName(Register object);
+
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
+ // Abort execution if argument is not a string, enabled via --debug-code.
+ void AssertString(Register object);
+
+ void JumpForHeapNumber(Register object,
+ Register heap_number_map,
+ Label* on_heap_number,
+ Label* on_not_heap_number = NULL);
+ void JumpIfHeapNumber(Register object,
+ Label* on_heap_number,
+ Register heap_number_map = NoReg);
+ void JumpIfNotHeapNumber(Register object,
+ Label* on_not_heap_number,
+ Register heap_number_map = NoReg);
+
+ // Sets the vs flag if the input is -0.0.
+ void TestForMinusZero(DoubleRegister input);
+
+ // Jump to label if the input double register contains -0.0.
+ void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero);
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found);
+
+ // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in
+ // output.
+ void ClampInt32ToUint8(Register in_out);
+ void ClampInt32ToUint8(Register output, Register input);
+
+ // Saturate a double in input to an unsigned 8-bit integer in output.
+ void ClampDoubleToUint8(Register output,
+ DoubleRegister input,
+ DoubleRegister dbl_scratch);
+
+ // Try to convert a double to a signed 32-bit int.
+ // This succeeds if the result compares equal to the input, so inputs of -0.0
+ // are converted to 0 and handled as a success.
+ //
+ // On output the Z flag is set if the conversion was successful.
+ void TryConvertDoubleToInt32(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion = NULL,
+ Label* on_failed_conversion = NULL) {
+ ASSERT(as_int.Is32Bits());
+ TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion,
+ on_failed_conversion);
+ }
+
+ // Try to convert a double to a signed 64-bit int.
+ // This succeeds if the result compares equal to the input, so inputs of -0.0
+ // are converted to 0 and handled as a success.
+ //
+ // On output the Z flag is set if the conversion was successful.
+ void TryConvertDoubleToInt64(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion = NULL,
+ Label* on_failed_conversion = NULL) {
+ ASSERT(as_int.Is64Bits());
+ TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion,
+ on_failed_conversion);
+ }
+
+ // ---- Object Utilities ----
+
+ // Copy fields from 'src' to 'dst', where both are tagged objects.
+ // The 'temps' list is a list of X registers which can be used for scratch
+ // values. The temps list must include at least one register.
+ //
+ // Currently, CopyFields cannot make use of more than three registers from
+ // the 'temps' list.
+ //
+ // CopyFields expects to be able to take at least two registers from
+ // MacroAssembler::TmpList().
+ void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
+
+ // Starting at address in dst, initialize field_count 64-bit fields with
+ // 64-bit value in register filler. Register dst is corrupted.
+ void FillFields(Register dst,
+ Register field_count,
+ Register filler);
+
+ // Copies a number of bytes from src to dst. All passed registers are
+ // clobbered. On exit src and dst will point to the place just after where the
+ // last byte was read or written and length will be zero. Hint may be used to
+ // determine which is the most efficient algorithm to use for copying.
+ void CopyBytes(Register dst,
+ Register src,
+ Register length,
+ Register scratch,
+ CopyHint hint = kCopyUnknown);
+
+ // ---- String Utilities ----
+
+
+ // Jump to label if either object is not a sequential ASCII string.
+ // Optionally perform a smi check on the objects first.
+ void JumpIfEitherIsNotSequentialAsciiStrings(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure,
+ SmiCheckType smi_check = DO_SMI_CHECK);
+
+ // Check if instance type is sequential ASCII string and jump to label if
+ // it is not.
+ void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure);
+
+ // Checks if both instance types are sequential ASCII strings and jumps to
+ // label if either is not.
+ void JumpIfEitherInstanceTypeIsNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ // Checks if both instance types are sequential ASCII strings and jumps to
+ // label if either is not.
+ void JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ void JumpIfNotUniqueName(Register type, Label* not_unique_name);
+
+ // ---- Calling / Jumping helpers ----
+
+ // This is required for compatibility in architecture indepenedant code.
+ inline void jmp(Label* L) { B(L); }
+
+ // Passes thrown value to the handler of top of the try handler chain.
+ // Register value must be x0.
+ void Throw(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ // Propagates an uncatchable exception to the top of the current JS stack's
+ // handler chain. Register value must be x0.
+ void ThrowUncatchable(Register value,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ // Throw a message string as an exception.
+ void Throw(BailoutReason reason);
+
+ // Throw a message string as an exception if a condition is not true.
+ void ThrowIf(Condition cc, BailoutReason reason);
+
+ // Throw a message string as an exception if the value is a smi.
+ void ThrowIfSmi(const Register& value, BailoutReason reason);
+
+ void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
+ void TailCallStub(CodeStub* stub);
+
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ }
+
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
+
+ void TailCallRuntime(Runtime::FunctionId fid,
+ int num_arguments,
+ int result_size);
+
+ int ActivationFrameAlignment();
+
+ // Calls a C function.
+ // The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function,
+ int num_reg_arguments);
+ void CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function,
+ int num_reg_arguments,
+ int num_double_arguments);
+
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions.
+ // 'stack_space' is the space to be unwound on exit (includes the call JS
+ // arguments space and the additional space allocated for the fast call).
+ // 'spill_offset' is the offset from the stack pointer where
+ // CallApiFunctionAndReturn can spill registers.
+ void CallApiFunctionAndReturn(Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ int spill_offset,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand);
+
+ // The number of register that CallApiFunctionAndReturn will need to save on
+ // the stack. The space for these registers need to be allocated in the
+ // ExitFrame before calling CallApiFunctionAndReturn.
+ static const int kCallApiFunctionSpillSpace = 4;
+
+ // Jump to a runtime routine.
+ void JumpToExternalReference(const ExternalReference& builtin);
+ // Tail call of a runtime routine (jump).
+ // Like JumpToExternalReference, but also takes care of passing the number
+ // of parameters.
+ void TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size);
+ void CallExternalReference(const ExternalReference& ext,
+ int num_arguments);
+
+
+ // Invoke specified builtin JavaScript function. Adds an entry to
+ // the unresolved list if the name does not resolve.
+ void InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper = NullCallWrapper());
+
+ // Store the code object for the given builtin in the target register and
+ // setup the function in the function register.
+ void GetBuiltinEntry(Register target,
+ Register function,
+ Builtins::JavaScript id);
+
+ // Store the function for the given builtin in the target register.
+ void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
+ void Jump(Register target);
+ void Jump(Address target, RelocInfo::Mode rmode);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode);
+ void Jump(intptr_t target, RelocInfo::Mode rmode);
+
+ void Call(Register target);
+ void Call(Label* target);
+ void Call(Address target, RelocInfo::Mode rmode);
+ void Call(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
+
+ // For every Call variant, there is a matching CallSize function that returns
+ // the size (in bytes) of the call sequence.
+ static int CallSize(Register target);
+ static int CallSize(Label* target);
+ static int CallSize(Address target, RelocInfo::Mode rmode);
+ static int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
+
+ // Registers used through the invocation chain are hard-coded.
+ // We force passing the parameters to ensure the contracts are correctly
+ // honoured by the caller.
+ // 'function' must be x1.
+ // 'actual' must use an immediate or x0.
+ // 'expected' must use an immediate or x2.
+ // 'call_kind' must be x5.
+ void InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_reg,
+ Label* done,
+ InvokeFlag flag,
+ bool* definitely_mismatches,
+ const CallWrapper& call_wrapper);
+ void InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+ // Invoke the JavaScript function in the given register.
+ // Changes the current context to the context in the function before invoking.
+ void InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+ void InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+ void InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+
+ // ---- Floating point helpers ----
+
+ // Perform a conversion from a double to a signed int64. If the input fits in
+ // range of the 64-bit result, execution branches to done. Otherwise,
+ // execution falls through, and the sign of the result can be used to
+ // determine if overflow was towards positive or negative infinity.
+ //
+ // On successful conversion, the least significant 32 bits of the result are
+ // equivalent to the ECMA-262 operation "ToInt32".
+ //
+ // Only public for the test code in test-code-stubs-arm64.cc.
+ void TryConvertDoubleToInt64(Register result,
+ DoubleRegister input,
+ Label* done);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Register result, DoubleRegister double_input);
+
+ // Performs a truncating conversion of a heap number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
+ // must be different registers. Exits with 'result' holding the answer.
+ void TruncateHeapNumberToI(Register result, Register object);
+
+ // Converts the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
+ // different registers.
+ void TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Label* not_int32);
+
+ // ---- Code generation helpers ----
+
+ void set_generating_stub(bool value) { generating_stub_ = value; }
+ bool generating_stub() const { return generating_stub_; }
+#if DEBUG
+ void set_allow_macro_instructions(bool value) {
+ allow_macro_instructions_ = value;
+ }
+ bool allow_macro_instructions() const { return allow_macro_instructions_; }
+#endif
+ bool use_real_aborts() const { return use_real_aborts_; }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() const { return has_frame_; }
+ bool AllowThisStubCall(CodeStub* stub);
+
+ class NoUseRealAbortsScope {
+ public:
+ explicit NoUseRealAbortsScope(MacroAssembler* masm) :
+ saved_(masm->use_real_aborts_), masm_(masm) {
+ masm_->use_real_aborts_ = false;
+ }
+ ~NoUseRealAbortsScope() {
+ masm_->use_real_aborts_ = saved_;
+ }
+ private:
+ bool saved_;
+ MacroAssembler* masm_;
+ };
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // ---------------------------------------------------------------------------
+ // Debugger Support
+
+ void DebugBreak();
+#endif
+ // ---------------------------------------------------------------------------
+ // Exception handling
+
+ // Push a new try handler and link into try handler chain.
+ void PushTryHandler(StackHandler::Kind kind, int handler_index);
+
+ // Unlink the stack handler on top of the stack from the try handler chain.
+ // Must preserve the result register.
+ void PopTryHandler();
+
+
+ // ---------------------------------------------------------------------------
+ // Allocation support
+
+ // Allocate an object in new space or old pointer space. The object_size is
+ // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
+ // is passed. The allocated object is returned in result.
+ //
+ // If the new space is exhausted control continues at the gc_required label.
+ // In this case, the result and scratch registers may still be clobbered.
+ // If flags includes TAG_OBJECT, the result is tagged as as a heap object.
+ void Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ void Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ // Undo allocation in new space. The object passed and objects allocated after
+ // it will no longer be allocated. The caller must make sure that no pointers
+ // are left to the object(s) no longer allocated as they would be invalid when
+ // allocation is undone.
+ void UndoAllocationInNewSpace(Register object, Register scratch);
+
+ void AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateTwoByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ // Allocates a heap number or jumps to the gc_required label if the young
+ // space is full and a scavenge is needed.
+ // All registers are clobbered.
+ // If no heap_number_map register is provided, the function will take care of
+ // loading it.
+ void AllocateHeapNumber(Register result,
+ Label* gc_required,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map = NoReg);
+ void AllocateHeapNumberWithValue(Register result,
+ DoubleRegister value,
+ Label* gc_required,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map = NoReg);
+
+ // ---------------------------------------------------------------------------
+ // Support functions.
+
+ // Try to get function prototype of a function and puts the value in the
+ // result register. Checks that the function really is a function and jumps
+ // to the miss label if the fast checks fail. The function register will be
+ // untouched; the other registers may be clobbered.
+ enum BoundFunctionAction {
+ kMissOnBoundFunction,
+ kDontMissOnBoundFunction
+ };
+
+ void TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss,
+ BoundFunctionAction action =
+ kDontMissOnBoundFunction);
+
+ // Compare object type for heap object. heap_object contains a non-Smi
+ // whose object type should be compared with the given type. This both
+ // sets the flags and leaves the object type in the type_reg register.
+ // It leaves the map in the map register (unless the type_reg and map register
+ // are the same register). It leaves the heap object in the heap_object
+ // register unless the heap_object register is the same register as one of the
+ // other registers.
+ void CompareObjectType(Register heap_object,
+ Register map,
+ Register type_reg,
+ InstanceType type);
+
+
+ // Compare object type for heap object, and branch if equal (or not.)
+ // heap_object contains a non-Smi whose object type should be compared with
+ // the given type. This both sets the flags and leaves the object type in
+ // the type_reg register. It leaves the map in the map register (unless the
+ // type_reg and map register are the same register). It leaves the heap
+ // object in the heap_object register unless the heap_object register is the
+ // same register as one of the other registers.
+ void JumpIfObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_cond_pass,
+ Condition cond = eq);
+
+ void JumpIfNotObjectType(Register object,
+ Register map,
+ Register type_reg,
+ InstanceType type,
+ Label* if_not_object);
+
+ // Compare instance type in a map. map contains a valid map object whose
+ // object type should be compared with the given type. This both
+ // sets the flags and leaves the object type in the type_reg register.
+ void CompareInstanceType(Register map,
+ Register type_reg,
+ InstanceType type);
+
+ // Compare an object's map with the specified map. Condition flags are set
+ // with result of map compare.
+ void CompareMap(Register obj,
+ Register scratch,
+ Handle<Map> map);
+
+ // As above, but the map of the object is already loaded into the register
+ // which is preserved by the code generated.
+ void CompareMap(Register obj_map,
+ Handle<Map> map);
+
+ // Check if the map of an object is equal to a specified map and branch to
+ // label if not. Skip the smi check if not required (object is known to be a
+ // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+ // against maps that are ElementsKind transition maps of the specified map.
+ void CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+
+ void CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+ // As above, but the map of the object is already loaded into obj_map, and is
+ // preserved.
+ void CheckMap(Register obj_map,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+ // Check if the map of an object is equal to a specified map and branch to a
+ // specified target if equal. Skip the smi check if not required (object is
+ // known to be a heap object)
+ void DispatchMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type);
+
+ // Test the bitfield of the heap object map with mask and set the condition
+ // flags. The object register is preserved.
+ void TestMapBitfield(Register object, uint64_t mask);
+
+ // Load the elements kind field from a map, and return it in the result
+ // register.
+ void LoadElementsKindFromMap(Register result, Register map);
+
+ // Compare the object in a register to a value from the root list.
+ void CompareRoot(const Register& obj, Heap::RootListIndex index);
+
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_equal);
+
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(const Register& obj,
+ Heap::RootListIndex index,
+ Label* if_not_equal);
+
+ // Load and check the instance type of an object for being a unique name.
+ // Loads the type into the second argument register.
+ // The object and type arguments can be the same register; in that case it
+ // will be overwritten with the type.
+ // Fall-through if the object was a string and jump on fail otherwise.
+ inline void IsObjectNameType(Register object, Register type, Label* fail);
+
+ inline void IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check the instance type in the given map to see if it corresponds to a
+ // JS object type. Jump to the fail label if this is not the case and fall
+ // through otherwise. However if fail label is NULL, no branch will be
+ // performed and the flag will be updated. You can test the flag for "le"
+ // condition to test if it is a valid JS object type.
+ inline void IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Load and check the instance type of an object for being a string.
+ // Loads the type into the second argument register.
+ // The object and type arguments can be the same register; in that case it
+ // will be overwritten with the type.
+ // Jumps to not_string or string appropriate. If the appropriate label is
+ // NULL, fall through.
+ inline void IsObjectJSStringType(Register object, Register type,
+ Label* not_string, Label* string = NULL);
+
+ // Compare the contents of a register with an operand, and branch to true,
+ // false or fall through, depending on condition.
+ void CompareAndSplit(const Register& lhs,
+ const Operand& rhs,
+ Condition cond,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
+
+ // Test the bits of register defined by bit_pattern, and branch to
+ // if_any_set, if_all_clear or fall_through accordingly.
+ void TestAndSplit(const Register& reg,
+ uint64_t bit_pattern,
+ Label* if_all_clear,
+ Label* if_any_set,
+ Label* fall_through);
+
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map, Register scratch, Label* fail);
+
+ // Check if a map for a JSObject indicates that the object can have both smi
+ // and HeapObject elements. Jump to the specified label if it does not.
+ void CheckFastObjectElements(Register map, Register scratch, Label* fail);
+
+ // Check to see if number can be stored as a double in FastDoubleElements.
+ // If it can, store it at the index specified by key_reg in the array,
+ // otherwise jump to fail.
+ void StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register elements_reg,
+ Register scratch1,
+ FPRegister fpscratch1,
+ FPRegister fpscratch2,
+ Label* fail,
+ int elements_offset = 0);
+
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
+ // ---------------------------------------------------------------------------
+ // Inline caching support.
+
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ SeqStringSetCharCheckIndexType index_type,
+ Register scratch,
+ uint32_t encoding_mask);
+
+ // Generate code for checking access rights - used for security checks
+ // on access to global objects across environments. The holder register
+ // is left untouched, whereas both scratch registers are clobbered.
+ void CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss);
+
+ // Hash the interger value in 'key' register.
+ // It uses the same algorithm as ComputeIntegerHash in utils.h.
+ void GetNumberHash(Register key, Register scratch);
+
+ // Load value from the dictionary.
+ //
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+ void LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ // ---------------------------------------------------------------------------
+ // Frames.
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
+ // Returns map with validated enum cache in object register.
+ void CheckEnumCache(Register object,
+ Register null_value,
+ Register scratch0,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* call_runtime);
+
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver should point to the array object.
+ // If allocation info is present, the Z flag is set (so that the eq
+ // condition will pass).
+ void TestJSArrayForAllocationMemento(Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
+ &no_memento_found);
+ B(eq, memento_found);
+ Bind(&no_memento_found);
+ }
+
+ // The stack pointer has to switch between csp and jssp when setting up and
+ // destroying the exit frame. Hence preserving/restoring the registers is
+ // slightly more complicated than simple push/pop operations.
+ void ExitFramePreserveFPRegs();
+ void ExitFrameRestoreFPRegs();
+
+ // Generates function and stub prologue code.
+ void Prologue(PrologueFrameMode frame_mode);
+
+ // Enter exit frame. Exit frames are used when calling C code from generated
+ // (JavaScript) code.
+ //
+ // The stack pointer must be jssp on entry, and will be set to csp by this
+ // function. The frame pointer is also configured, but the only other
+ // registers modified by this function are the provided scratch register, and
+ // jssp.
+ //
+ // The 'extra_space' argument can be used to allocate some space in the exit
+ // frame that will be ignored by the GC. This space will be reserved in the
+ // bottom of the frame immediately above the return address slot.
+ //
+ // Set up a stack frame and registers as follows:
+ // fp[8]: CallerPC (lr)
+ // fp -> fp[0]: CallerFP (old fp)
+ // fp[-8]: SPOffset (new csp)
+ // fp[-16]: CodeObject()
+ // fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
+ // csp[8]: Memory reserved for the caller if extra_space != 0.
+ // Alignment padding, if necessary.
+ // csp -> csp[0]: Space reserved for the return address.
+ //
+ // This function also stores the new frame information in the top frame, so
+ // that the new frame becomes the current frame.
+ void EnterExitFrame(bool save_doubles,
+ const Register& scratch,
+ int extra_space = 0);
+
+ // Leave the current exit frame, after a C function has returned to generated
+ // (JavaScript) code.
+ //
+ // This effectively unwinds the operation of EnterExitFrame:
+ // * Preserved doubles are restored (if restore_doubles is true).
+ // * The frame information is removed from the top frame.
+ // * The exit frame is dropped.
+ // * The stack pointer is reset to jssp.
+ //
+ // The stack pointer must be csp on entry.
+ void LeaveExitFrame(bool save_doubles,
+ const Register& scratch,
+ bool restore_context);
+
+ void LoadContext(Register dst, int context_chain_length);
+
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged. Dividend and result must be different.
+ void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+
+ // ---------------------------------------------------------------------------
+ // StatsCounter support
+
+ void SetCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+
+ // ---------------------------------------------------------------------------
+ // Garbage collector support (GC).
+
+ enum RememberedSetFinalAction {
+ kReturnAtEnd,
+ kFallThroughAtEnd
+ };
+
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr,
+ Register scratch1,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then);
+
+ // Push and pop the registers that can hold pointers, as defined by the
+ // RegList constant kSafepointSavedRegisters.
+ void PushSafepointRegisters();
+ void PopSafepointRegisters();
+
+ void PushSafepointRegistersAndDoubles();
+ void PopSafepointRegistersAndDoubles();
+
+ // Store value in register src in the safepoint stack slot for register dst.
+ void StoreToSafepointRegisterSlot(Register src, Register dst) {
+ Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
+ }
+
+ // Load the value of the src register from its safepoint stack slot
+ // into register dst.
+ void LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
+ }
+
+ void CheckPageFlagSet(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_any_set);
+
+ void CheckPageFlagClear(const Register& object,
+ const Register& scratch,
+ int mask,
+ Label* if_all_clear);
+
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
+ // Check if object is in new space and jump accordingly.
+ // Register 'object' is preserved.
+ void JumpIfNotInNewSpace(Register object,
+ Label* branch) {
+ InNewSpace(object, ne, branch);
+ }
+
+ void JumpIfInNewSpace(Register object,
+ Label* branch) {
+ InNewSpace(object, eq, branch);
+ }
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // MemOperand(reg, off).
+ inline void RecordWriteContextSlot(
+ Register context,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK) {
+ RecordWriteField(context,
+ offset + kHeapObjectTag,
+ value,
+ scratch,
+ lr_status,
+ save_fp,
+ remembered_set_action,
+ smi_check);
+ }
+
+ // For a given |object| notify the garbage collector that the slot |address|
+ // has been written. |value| is the object being stored. The value and
+ // address registers are clobbered by the operation.
+ void RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // Checks the color of an object. If the object is already grey or black
+ // then we just fall through, since it is already live. If it is white and
+ // we can determine that it doesn't need to be scanned, then we just mark it
+ // black and fall through. For the rest we jump to the label so the
+ // incremental marker can fix its assumptions.
+ void EnsureNotWhite(Register object,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* object_is_white_and_not_data);
+
+ // Detects conservatively whether an object is data-only, i.e. it does need to
+ // be scanned by the garbage collector.
+ void JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object);
+
+ // Helper for finding the mark bits for an address.
+ // Note that the behaviour slightly differs from other architectures.
+ // On exit:
+ // - addr_reg is unchanged.
+ // - The bitmap register points at the word with the mark bits.
+ // - The shift register contains the index of the first color bit for this
+ // object in the bitmap.
+ inline void GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register shift_reg);
+
+ // Check if an object has a given incremental marking color.
+ void HasColor(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* has_color,
+ int first_bit,
+ int second_bit);
+
+ void JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black);
+
+
+ // Get the location of a relocated constant (its address in the constant pool)
+ // from its load site.
+ void GetRelocatedValueLocation(Register ldr_location,
+ Register result);
+
+
+ // ---------------------------------------------------------------------------
+ // Debugging.
+
+ // Calls Abort(msg) if the condition cond is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cond, BailoutReason reason);
+ void AssertRegisterIsClear(Register reg, BailoutReason reason);
+ void AssertRegisterIsRoot(
+ Register reg,
+ Heap::RootListIndex index,
+ BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
+ void AssertFastElements(Register elements);
+
+ // Abort if the specified register contains the invalid color bit pattern.
+ // The pattern must be in bits [1:0] of 'reg' register.
+ //
+ // If emit_debug_code() is false, this emits no code.
+ void AssertHasValidColor(const Register& reg);
+
+ // Abort if 'object' register doesn't point to a string object.
+ //
+ // If emit_debug_code() is false, this emits no code.
+ void AssertIsString(const Register& object);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cond, BailoutReason reason);
+ void CheckRegisterIsClear(Register reg, BailoutReason reason);
+
+ // Print a message to stderr and abort execution.
+ void Abort(BailoutReason reason);
+
+ // Conditionally load the cached Array transitioned map of type
+ // transitioned_kind from the native context if the map in register
+ // map_in_out is the cached Array map in the native context of
+ // expected_kind.
+ void LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch1,
+ Register scratch2,
+ Label* no_map_match);
+
+ void LoadGlobalFunction(int index, Register function);
+
+ // Load the initial map from the global function. The registers function and
+ // map can be the same, function is then overwritten.
+ void LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch);
+
+ CPURegList* TmpList() { return &tmp_list_; }
+ CPURegList* FPTmpList() { return &fptmp_list_; }
+
+ // Like printf, but print at run-time from generated code.
+ //
+ // The caller must ensure that arguments for floating-point placeholders
+ // (such as %e, %f or %g) are FPRegisters, and that arguments for integer
+ // placeholders are Registers.
+ //
+ // A maximum of four arguments may be given to any single Printf call. The
+ // arguments must be of the same type, but they do not need to have the same
+ // size.
+ //
+ // The following registers cannot be printed:
+ // StackPointer(), csp.
+ //
+ // This function automatically preserves caller-saved registers so that
+ // calling code can use Printf at any point without having to worry about
+ // corruption. The preservation mechanism generates a lot of code. If this is
+ // a problem, preserve the important registers manually and then call
+ // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
+ // implicitly preserved.
+ //
+ // Unlike many MacroAssembler functions, x8 and x9 are guaranteed to be
+ // preserved, and can be printed. This allows Printf to be used during debug
+ // code.
+ //
+ // This function assumes (and asserts) that the current stack pointer is
+ // callee-saved, not caller-saved. This is most likely the case anyway, as a
+ // caller-saved stack pointer doesn't make a lot of sense.
+ void Printf(const char * format,
+ const CPURegister& arg0 = NoCPUReg,
+ const CPURegister& arg1 = NoCPUReg,
+ const CPURegister& arg2 = NoCPUReg,
+ const CPURegister& arg3 = NoCPUReg);
+
+ // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
+ //
+ // The return code from the system printf call will be returned in x0.
+ void PrintfNoPreserve(const char * format,
+ const CPURegister& arg0 = NoCPUReg,
+ const CPURegister& arg1 = NoCPUReg,
+ const CPURegister& arg2 = NoCPUReg,
+ const CPURegister& arg3 = NoCPUReg);
+
+ // Code ageing support functions.
+
+ // Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a
+ // function as old, it replaces some of the function prologue (generated by
+ // FullCodeGenerator::Generate) with a call to a special stub (ultimately
+ // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
+ // function prologue to its initial young state (indicating that it has been
+ // recently run) and continues. A young function is therefore one which has a
+ // normal frame setup sequence, and an old function has a code age sequence
+ // which calls a code ageing stub.
+
+ // Set up a basic stack frame for young code (or code exempt from ageing) with
+ // type FUNCTION. It may be patched later for code ageing support. This is
+ // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
+ //
+ // This function takes an Assembler so it can be called from either a
+ // MacroAssembler or a PatchingAssembler context.
+ static void EmitFrameSetupForCodeAgePatching(Assembler* assm);
+
+ // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
+ void EmitFrameSetupForCodeAgePatching();
+
+ // Emit a code age sequence that calls the relevant code age stub. The code
+ // generated by this sequence is expected to replace the code generated by
+ // EmitFrameSetupForCodeAgePatching, and represents an old function.
+ //
+ // If stub is NULL, this function generates the code age sequence but omits
+ // the stub address that is normally embedded in the instruction stream. This
+ // can be used by debug code to verify code age sequences.
+ static void EmitCodeAgeSequence(Assembler* assm, Code* stub);
+
+ // Call EmitCodeAgeSequence from a MacroAssembler context.
+ void EmitCodeAgeSequence(Code* stub);
+
+ // Return true if the sequence is a young sequence geneated by
+ // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the
+ // sequence is a code age sequence (emitted by EmitCodeAgeSequence).
+ static bool IsYoungSequence(byte* sequence);
+
+#ifdef DEBUG
+ // Return true if the sequence is a code age sequence generated by
+ // EmitCodeAgeSequence.
+ static bool IsCodeAgeSequence(byte* sequence);
+#endif
+
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
+
+ private:
+ // Helpers for CopyFields.
+ // These each implement CopyFields in a different way.
+ void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
+ Register scratch1, Register scratch2,
+ Register scratch3, Register scratch4,
+ Register scratch5);
+ void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
+ Register scratch1, Register scratch2,
+ Register scratch3, Register scratch4);
+ void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
+ Register scratch1, Register scratch2,
+ Register scratch3);
+
+ // The actual Push and Pop implementations. These don't generate any code
+ // other than that required for the push or pop. This allows
+ // (Push|Pop)CPURegList to bundle together run-time assertions for a large
+ // block of registers.
+ //
+ // Note that size is per register, and is specified in bytes.
+ void PushHelper(int count, int size,
+ const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3);
+ void PopHelper(int count, int size,
+ const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3);
+
+ // Perform necessary maintenance operations before a push or pop.
+ //
+ // Note that size is specified in bytes.
+ void PrepareForPush(Operand total_size);
+ void PrepareForPop(Operand total_size);
+
+ void PrepareForPush(int count, int size) { PrepareForPush(count * size); }
+ void PrepareForPop(int count, int size) { PrepareForPop(count * size); }
+
+ // Call Printf. On a native build, a simple call will be generated, but if the
+ // simulator is being used then a suitable pseudo-instruction is used. The
+ // arguments and stack (csp) must be prepared by the caller as for a normal
+ // AAPCS64 call to 'printf'.
+ //
+ // The 'type' argument specifies the type of the optional arguments.
+ void CallPrintf(CPURegister::RegisterType type = CPURegister::kNoRegister);
+
+ // Helper for throwing exceptions. Compute a handler address and jump to
+ // it. See the implementation for register usage.
+ void JumpToHandlerEntry(Register exception,
+ Register object,
+ Register state,
+ Register scratch1,
+ Register scratch2);
+
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Condition cond, // eq for new space, ne otherwise.
+ Label* branch);
+
+ // Try to convert a double to an int so that integer fast-paths may be
+ // used. Not every valid integer value is guaranteed to be caught.
+ // It supports both 32-bit and 64-bit integers depending whether 'as_int'
+ // is a W or X register.
+ //
+ // This does not distinguish between +0 and -0, so if this distinction is
+ // important it must be checked separately.
+ //
+ // On output the Z flag is set if the conversion was successful.
+ void TryConvertDoubleToInt(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion = NULL,
+ Label* on_failed_conversion = NULL);
+
+ bool generating_stub_;
+#if DEBUG
+ // Tell whether any of the macro instruction can be used. When false the
+ // MacroAssembler will assert if a method which can emit a variable number
+ // of instructions is called.
+ bool allow_macro_instructions_;
+#endif
+ bool has_frame_;
+
+ // The Abort method should call a V8 runtime function, but the CallRuntime
+ // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
+ // use a simpler abort mechanism that doesn't depend on CEntryStub.
+ //
+ // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is
+ // being generated.
+ bool use_real_aborts_;
+
+ // This handle will be patched with the code object on installation.
+ Handle<Object> code_object_;
+
+ // The register to use as a stack pointer for stack operations.
+ Register sp_;
+
+ // Scratch registers available for use by the MacroAssembler.
+ CPURegList tmp_list_;
+ CPURegList fptmp_list_;
+
+ void InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2);
+
+ public:
+ // Far branches resolving.
+ //
+ // The various classes of branch instructions with immediate offsets have
+ // different ranges. While the Assembler will fail to assemble a branch
+ // exceeding its range, the MacroAssembler offers a mechanism to resolve
+ // branches to too distant targets, either by tweaking the generated code to
+ // use branch instructions with wider ranges or generating veneers.
+ //
+ // Currently branches to distant targets are resolved using unconditional
+ // branch isntructions with a range of +-128MB. If that becomes too little
+ // (!), the mechanism can be extended to generate special veneers for really
+ // far targets.
+
+ // Helps resolve branching to labels potentially out of range.
+ // If the label is not bound, it registers the information necessary to later
+ // be able to emit a veneer for this branch if necessary.
+ // If the label is bound, it returns true if the label (or the previous link
+ // in the label chain) is out of range. In that case the caller is responsible
+ // for generating appropriate code.
+ // Otherwise it returns false.
+ // This function also checks wether veneers need to be emitted.
+ bool NeedExtraInstructionsOrRegisterBranch(Label *label,
+ ImmBranchType branch_type);
+};
+
+
+// Use this scope when you need a one-to-one mapping bewteen methods and
+// instructions. This scope prevents the MacroAssembler from being called and
+// literal pools from being emitted. It also asserts the number of instructions
+// emitted is what you specified when creating the scope.
+class InstructionAccurateScope BASE_EMBEDDED {
+ public:
+ InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
+ : masm_(masm)
+#ifdef DEBUG
+ ,
+ size_(count * kInstructionSize)
+#endif
+ {
+ // Before blocking the const pool, see if it needs to be emitted.
+ masm_->CheckConstPool(false, true);
+ masm_->CheckVeneerPool(false, true);
+
+ masm_->StartBlockPools();
+#ifdef DEBUG
+ if (count != 0) {
+ masm_->bind(&start_);
+ }
+ previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
+ masm_->set_allow_macro_instructions(false);
+#endif
+ }
+
+ ~InstructionAccurateScope() {
+ masm_->EndBlockPools();
+#ifdef DEBUG
+ if (start_.is_bound()) {
+ ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
+ }
+ masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
+#endif
+ }
+
+ private:
+ MacroAssembler* masm_;
+#ifdef DEBUG
+ size_t size_;
+ Label start_;
+ bool previous_allow_macro_instructions_;
+#endif
+};
+
+
+// This scope utility allows scratch registers to be managed safely. The
+// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
+// registers. These registers can be allocated on demand, and will be returned
+// at the end of the scope.
+//
+// When the scope ends, the MacroAssembler's lists will be restored to their
+// original state, even if the lists were modified by some other means.
+class UseScratchRegisterScope {
+ public:
+ explicit UseScratchRegisterScope(MacroAssembler* masm)
+ : available_(masm->TmpList()),
+ availablefp_(masm->FPTmpList()),
+ old_available_(available_->list()),
+ old_availablefp_(availablefp_->list()) {
+ ASSERT(available_->type() == CPURegister::kRegister);
+ ASSERT(availablefp_->type() == CPURegister::kFPRegister);
+ }
+
+ ~UseScratchRegisterScope();
+
+ // Take a register from the appropriate temps list. It will be returned
+ // automatically when the scope ends.
+ Register AcquireW() { return AcquireNextAvailable(available_).W(); }
+ Register AcquireX() { return AcquireNextAvailable(available_).X(); }
+ FPRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
+ FPRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
+
+ Register UnsafeAcquire(const Register& reg) {
+ return Register(UnsafeAcquire(available_, reg));
+ }
+
+ Register AcquireSameSizeAs(const Register& reg);
+ FPRegister AcquireSameSizeAs(const FPRegister& reg);
+
+ private:
+ static CPURegister AcquireNextAvailable(CPURegList* available);
+ static CPURegister UnsafeAcquire(CPURegList* available,
+ const CPURegister& reg);
+
+ // Available scratch registers.
+ CPURegList* available_; // kRegister
+ CPURegList* availablefp_; // kFPRegister
+
+ // The state of the available lists at the start of this scope.
+ RegList old_available_; // kRegister
+ RegList old_availablefp_; // kFPRegister
+};
+
+
+inline MemOperand ContextMemOperand(Register context, int index) {
+ return MemOperand(context, Context::SlotOffset(index));
+}
+
+inline MemOperand GlobalObjectMemOperand() {
+ return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+}
+
+
+// Encode and decode information about patchable inline SMI checks.
+class InlineSmiCheckInfo {
+ public:
+ explicit InlineSmiCheckInfo(Address info);
+
+ bool HasSmiCheck() const {
+ return smi_check_ != NULL;
+ }
+
+ const Register& SmiRegister() const {
+ return reg_;
+ }
+
+ Instruction* SmiCheck() const {
+ return smi_check_;
+ }
+
+ // Use MacroAssembler::InlineData to emit information about patchable inline
+ // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
+ // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
+ //
+ // The generated patch information can be read using the InlineSMICheckInfo
+ // class.
+ static void Emit(MacroAssembler* masm, const Register& reg,
+ const Label* smi_check);
+
+ // Emit information to indicate that there is no inline SMI check.
+ static void EmitNotInlined(MacroAssembler* masm) {
+ Label unbound;
+ Emit(masm, NoReg, &unbound);
+ }
+
+ private:
+ Register reg_;
+ Instruction* smi_check_;
+
+ // Fields in the data encoded by InlineData.
+
+ // A width of 5 (Rd_width) for the SMI register preclues the use of csp,
+ // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
+ // used in a patchable check. The Emit() method checks this.
+ //
+ // Note that the total size of the fields is restricted by the underlying
+ // storage size handled by the BitField class, which is a uint32_t.
+ class RegisterBits : public BitField<unsigned, 0, 5> {};
+ class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
+};
+
+} } // namespace v8::internal
+
+#ifdef GENERATED_CODE_COVERAGE
+#error "Unsupported option"
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+
+#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
new file mode 100644
index 000000000..536580ab5
--- /dev/null
+++ b/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
@@ -0,0 +1,1728 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "cpu-profiler.h"
+#include "unicode.h"
+#include "log.h"
+#include "code-stubs.h"
+#include "regexp-stack.h"
+#include "macro-assembler.h"
+#include "regexp-macro-assembler.h"
+#include "arm64/regexp-macro-assembler-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+/*
+ * This assembler uses the following register assignment convention:
+ * - w19 : Used to temporarely store a value before a call to C code.
+ * See CheckNotBackReferenceIgnoreCase.
+ * - x20 : Pointer to the current code object (Code*),
+ * it includes the heap object tag.
+ * - w21 : Current position in input, as negative offset from
+ * the end of the string. Please notice that this is
+ * the byte offset, not the character offset!
+ * - w22 : Currently loaded character. Must be loaded using
+ * LoadCurrentCharacter before using any of the dispatch methods.
+ * - x23 : Points to tip of backtrack stack.
+ * - w24 : Position of the first character minus one: non_position_value.
+ * Used to initialize capture registers.
+ * - x25 : Address at the end of the input string: input_end.
+ * Points to byte after last character in input.
+ * - x26 : Address at the start of the input string: input_start.
+ * - w27 : Where to start in the input string.
+ * - x28 : Output array pointer.
+ * - x29/fp : Frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - x16/x17 : IP registers, used by assembler. Very volatile.
+ * - csp : Points to tip of C stack.
+ *
+ * - x0-x7 : Used as a cache to store 32 bit capture registers. These
+ * registers need to be retained every time a call to C code
+ * is done.
+ *
+ * The remaining registers are free for computations.
+ * Each call to a public method should retain this convention.
+ *
+ * The stack will have the following structure:
+ *
+ * Location Name Description
+ * (as referred to in
+ * the code)
+ *
+ * - fp[104] isolate Address of the current isolate.
+ * - fp[96] return_address Secondary link/return address
+ * used by an exit frame if this is a
+ * native call.
+ * ^^^ csp when called ^^^
+ * - fp[88] lr Return from the RegExp code.
+ * - fp[80] r29 Old frame pointer (CalleeSaved).
+ * - fp[0..72] r19-r28 Backup of CalleeSaved registers.
+ * - fp[-8] direct_call 1 => Direct call from JavaScript code.
+ * 0 => Call through the runtime system.
+ * - fp[-16] stack_base High end of the memory area to use as
+ * the backtracking stack.
+ * - fp[-24] output_size Output may fit multiple sets of matches.
+ * - fp[-32] input Handle containing the input string.
+ * - fp[-40] success_counter
+ * ^^^^^^^^^^^^^ From here and downwards we store 32 bit values ^^^^^^^^^^^^^
+ * - fp[-44] register N Capture registers initialized with
+ * - fp[-48] register N + 1 non_position_value.
+ * ... The first kNumCachedRegisters (N) registers
+ * ... are cached in x0 to x7.
+ * ... Only positions must be stored in the first
+ * - ... num_saved_registers_ registers.
+ * - ...
+ * - register N + num_registers - 1
+ * ^^^^^^^^^ csp ^^^^^^^^^
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
+ * int (*match)(String* input,
+ * int start_offset,
+ * Address input_start,
+ * Address input_end,
+ * int* output,
+ * int output_size,
+ * Address stack_base,
+ * bool direct_call = false,
+ * Address secondary_return_address, // Only used by native call.
+ * Isolate* isolate)
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
+ * in arm64/simulator-arm64.h.
+ * When calling as a non-direct call (i.e., from C++ code), the return address
+ * area is overwritten with the LR register by the RegExp code. When doing a
+ * direct call from generated code, the return address is placed there by
+ * the calling code, as in a normal exit frame.
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(
+ Mode mode,
+ int registers_to_save,
+ Zone* zone)
+ : NativeRegExpMacroAssembler(zone),
+ masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_() {
+ __ SetStackPointer(csp);
+ ASSERT_EQ(0, registers_to_save % 2);
+ // We can cache at most 16 W registers in x0-x7.
+ STATIC_ASSERT(kNumCachedRegisters <= 16);
+ STATIC_ASSERT((kNumCachedRegisters % 2) == 0);
+ __ B(&entry_label_); // We'll write the entry code later.
+ __ Bind(&start_label_); // And then continue from here.
+}
+
+
+RegExpMacroAssemblerARM64::~RegExpMacroAssemblerARM64() {
+ delete masm_;
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+}
+
+int RegExpMacroAssemblerARM64::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerARM64::AdvanceCurrentPosition(int by) {
+ if (by != 0) {
+ __ Add(current_input_offset(),
+ current_input_offset(), by * char_size());
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::AdvanceRegister(int reg, int by) {
+ ASSERT((reg >= 0) && (reg < num_registers_));
+ if (by != 0) {
+ Register to_advance;
+ RegisterState register_state = GetRegisterState(reg);
+ switch (register_state) {
+ case STACKED:
+ __ Ldr(w10, register_location(reg));
+ __ Add(w10, w10, by);
+ __ Str(w10, register_location(reg));
+ break;
+ case CACHED_LSW:
+ to_advance = GetCachedRegister(reg);
+ __ Add(to_advance, to_advance, by);
+ break;
+ case CACHED_MSW:
+ to_advance = GetCachedRegister(reg);
+ __ Add(to_advance, to_advance,
+ static_cast<int64_t>(by) << kWRegSizeInBits);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::Backtrack() {
+ CheckPreemption();
+ Pop(w10);
+ __ Add(x10, code_pointer(), Operand(w10, UXTW));
+ __ Br(x10);
+}
+
+
+void RegExpMacroAssemblerARM64::Bind(Label* label) {
+ __ Bind(label);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacter(uint32_t c, Label* on_equal) {
+ CompareAndBranchOrBacktrack(current_character(), c, eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacterGT(uc16 limit,
+ Label* on_greater) {
+ CompareAndBranchOrBacktrack(current_character(), limit, hi, on_greater);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckAtStart(Label* on_at_start) {
+ Label not_at_start;
+ // Did we start the match at the start of the input string?
+ CompareAndBranchOrBacktrack(start_offset(), 0, ne, &not_at_start);
+ // If we did, are we still at the start of the input string?
+ __ Add(x10, input_end(), Operand(current_input_offset(), SXTW));
+ __ Cmp(x10, input_start());
+ BranchOrBacktrack(eq, on_at_start);
+ __ Bind(&not_at_start);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckNotAtStart(Label* on_not_at_start) {
+ // Did we start the match at the start of the input string?
+ CompareAndBranchOrBacktrack(start_offset(), 0, ne, on_not_at_start);
+ // If we did, are we still at the start of the input string?
+ __ Add(x10, input_end(), Operand(current_input_offset(), SXTW));
+ __ Cmp(x10, input_start());
+ BranchOrBacktrack(ne, on_not_at_start);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacterLT(uc16 limit, Label* on_less) {
+ CompareAndBranchOrBacktrack(current_character(), limit, lo, on_less);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string) {
+ // This method is only ever called from the cctests.
+
+ if (check_end_of_string) {
+ // Is last character of required match inside string.
+ CheckPosition(cp_offset + str.length() - 1, on_failure);
+ }
+
+ Register characters_address = x11;
+
+ __ Add(characters_address,
+ input_end(),
+ Operand(current_input_offset(), SXTW));
+ if (cp_offset != 0) {
+ __ Add(characters_address, characters_address, cp_offset * char_size());
+ }
+
+ for (int i = 0; i < str.length(); i++) {
+ if (mode_ == ASCII) {
+ __ Ldrb(w10, MemOperand(characters_address, 1, PostIndex));
+ ASSERT(str[i] <= String::kMaxOneByteCharCode);
+ } else {
+ __ Ldrh(w10, MemOperand(characters_address, 2, PostIndex));
+ }
+ CompareAndBranchOrBacktrack(w10, str[i], ne, on_failure);
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::CheckGreedyLoop(Label* on_equal) {
+ __ Ldr(w10, MemOperand(backtrack_stackpointer()));
+ __ Cmp(current_input_offset(), w10);
+ __ Cset(x11, eq);
+ __ Add(backtrack_stackpointer(),
+ backtrack_stackpointer(), Operand(x11, LSL, kWRegSizeLog2));
+ BranchOrBacktrack(eq, on_equal);
+}
+
+void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+
+ Register capture_start_offset = w10;
+ // Save the capture length in a callee-saved register so it will
+ // be preserved if we call a C helper.
+ Register capture_length = w19;
+ ASSERT(kCalleeSaved.IncludesAliasOf(capture_length));
+
+ // Find length of back-referenced capture.
+ ASSERT((start_reg % 2) == 0);
+ if (start_reg < kNumCachedRegisters) {
+ __ Mov(capture_start_offset.X(), GetCachedRegister(start_reg));
+ __ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits);
+ } else {
+ __ Ldp(w11, capture_start_offset, capture_location(start_reg, x10));
+ }
+ __ Sub(capture_length, w11, capture_start_offset); // Length to check.
+ // Succeed on empty capture (including no capture).
+ __ Cbz(capture_length, &fallthrough);
+
+ // Check that there are enough characters left in the input.
+ __ Cmn(capture_length, current_input_offset());
+ BranchOrBacktrack(gt, on_no_match);
+
+ if (mode_ == ASCII) {
+ Label success;
+ Label fail;
+ Label loop_check;
+
+ Register capture_start_address = x12;
+ Register capture_end_addresss = x13;
+ Register current_position_address = x14;
+
+ __ Add(capture_start_address,
+ input_end(),
+ Operand(capture_start_offset, SXTW));
+ __ Add(capture_end_addresss,
+ capture_start_address,
+ Operand(capture_length, SXTW));
+ __ Add(current_position_address,
+ input_end(),
+ Operand(current_input_offset(), SXTW));
+
+ Label loop;
+ __ Bind(&loop);
+ __ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
+ __ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
+ __ Cmp(w10, w11);
+ __ B(eq, &loop_check);
+
+ // Mismatch, try case-insensitive match (converting letters to lower-case).
+ __ Orr(w10, w10, 0x20); // Convert capture character to lower-case.
+ __ Orr(w11, w11, 0x20); // Also convert input character.
+ __ Cmp(w11, w10);
+ __ B(ne, &fail);
+ __ Sub(w10, w10, 'a');
+ __ Cmp(w10, 'z' - 'a'); // Is w10 a lowercase letter?
+ __ B(ls, &loop_check); // In range 'a'-'z'.
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ Sub(w10, w10, 224 - 'a');
+ __ Cmp(w10, 254 - 224);
+ __ Ccmp(w10, 247 - 224, ZFlag, ls); // Check for 247.
+ __ B(eq, &fail); // Weren't Latin-1 letters.
+
+ __ Bind(&loop_check);
+ __ Cmp(capture_start_address, capture_end_addresss);
+ __ B(lt, &loop);
+ __ B(&success);
+
+ __ Bind(&fail);
+ BranchOrBacktrack(al, on_no_match);
+
+ __ Bind(&success);
+ // Compute new value of character position after the matched part.
+ __ Sub(current_input_offset().X(), current_position_address, input_end());
+ if (masm_->emit_debug_code()) {
+ __ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
+ __ Ccmp(current_input_offset(), 0, NoFlag, eq);
+ // The current input offset should be <= 0, and fit in a W register.
+ __ Check(le, kOffsetOutOfRange);
+ }
+ } else {
+ ASSERT(mode_ == UC16);
+ int argument_count = 4;
+
+ // The cached registers need to be retained.
+ CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7);
+ ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
+ __ PushCPURegList(cached_registers);
+
+ // Put arguments into arguments registers.
+ // Parameters are
+ // x0: Address byte_offset1 - Address captured substring's start.
+ // x1: Address byte_offset2 - Address of current character position.
+ // w2: size_t byte_length - length of capture in bytes(!)
+ // x3: Isolate* isolate
+
+ // Address of start of capture.
+ __ Add(x0, input_end(), Operand(capture_start_offset, SXTW));
+ // Length of capture.
+ __ Mov(w2, capture_length);
+ // Address of current input position.
+ __ Add(x1, input_end(), Operand(current_input_offset(), SXTW));
+ // Isolate.
+ __ Mov(x3, ExternalReference::isolate_address(isolate()));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference function =
+ ExternalReference::re_case_insensitive_compare_uc16(isolate());
+ __ CallCFunction(function, argument_count);
+ }
+
+ // Check if function returned non-zero for success or zero for failure.
+ CompareAndBranchOrBacktrack(x0, 0, eq, on_no_match);
+ // On success, increment position by length of capture.
+ __ Add(current_input_offset(), current_input_offset(), capture_length);
+ // Reset the cached registers.
+ __ PopCPURegList(cached_registers);
+ }
+
+ __ Bind(&fallthrough);
+}
+
+void RegExpMacroAssemblerARM64::CheckNotBackReference(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+
+ Register capture_start_address = x12;
+ Register capture_end_address = x13;
+ Register current_position_address = x14;
+ Register capture_length = w15;
+
+ // Find length of back-referenced capture.
+ ASSERT((start_reg % 2) == 0);
+ if (start_reg < kNumCachedRegisters) {
+ __ Mov(x10, GetCachedRegister(start_reg));
+ __ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits);
+ } else {
+ __ Ldp(w11, w10, capture_location(start_reg, x10));
+ }
+ __ Sub(capture_length, w11, w10); // Length to check.
+ // Succeed on empty capture (including no capture).
+ __ Cbz(capture_length, &fallthrough);
+
+ // Check that there are enough characters left in the input.
+ __ Cmn(capture_length, current_input_offset());
+ BranchOrBacktrack(gt, on_no_match);
+
+ // Compute pointers to match string and capture string
+ __ Add(capture_start_address, input_end(), Operand(w10, SXTW));
+ __ Add(capture_end_address,
+ capture_start_address,
+ Operand(capture_length, SXTW));
+ __ Add(current_position_address,
+ input_end(),
+ Operand(current_input_offset(), SXTW));
+
+ Label loop;
+ __ Bind(&loop);
+ if (mode_ == ASCII) {
+ __ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
+ __ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
+ } else {
+ ASSERT(mode_ == UC16);
+ __ Ldrh(w10, MemOperand(capture_start_address, 2, PostIndex));
+ __ Ldrh(w11, MemOperand(current_position_address, 2, PostIndex));
+ }
+ __ Cmp(w10, w11);
+ BranchOrBacktrack(ne, on_no_match);
+ __ Cmp(capture_start_address, capture_end_address);
+ __ B(lt, &loop);
+
+ // Move current character position to position after match.
+ __ Sub(current_input_offset().X(), current_position_address, input_end());
+ if (masm_->emit_debug_code()) {
+ __ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
+ __ Ccmp(current_input_offset(), 0, NoFlag, eq);
+ // The current input offset should be <= 0, and fit in a W register.
+ __ Check(le, kOffsetOutOfRange);
+ }
+ __ Bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckNotCharacter(unsigned c,
+ Label* on_not_equal) {
+ CompareAndBranchOrBacktrack(current_character(), c, ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal) {
+ __ And(w10, current_character(), mask);
+ CompareAndBranchOrBacktrack(w10, c, eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckNotCharacterAfterAnd(unsigned c,
+ unsigned mask,
+ Label* on_not_equal) {
+ __ And(w10, current_character(), mask);
+ CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckNotCharacterAfterMinusAnd(
+ uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal) {
+ ASSERT(minus < String::kMaxUtf16CodeUnit);
+ __ Sub(w10, current_character(), minus);
+ __ And(w10, w10, mask);
+ CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacterInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_in_range) {
+ __ Sub(w10, current_character(), from);
+ // Unsigned lower-or-same condition.
+ CompareAndBranchOrBacktrack(w10, to - from, ls, on_in_range);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckCharacterNotInRange(
+ uc16 from,
+ uc16 to,
+ Label* on_not_in_range) {
+ __ Sub(w10, current_character(), from);
+ // Unsigned higher condition.
+ CompareAndBranchOrBacktrack(w10, to - from, hi, on_not_in_range);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckBitInTable(
+ Handle<ByteArray> table,
+ Label* on_bit_set) {
+ __ Mov(x11, Operand(table));
+ if ((mode_ != ASCII) || (kTableMask != String::kMaxOneByteCharCode)) {
+ __ And(w10, current_character(), kTableMask);
+ __ Add(w10, w10, ByteArray::kHeaderSize - kHeapObjectTag);
+ } else {
+ __ Add(w10, current_character(), ByteArray::kHeaderSize - kHeapObjectTag);
+ }
+ __ Ldrb(w11, MemOperand(x11, w10, UXTW));
+ CompareAndBranchOrBacktrack(w11, 0, ne, on_bit_set);
+}
+
+
+bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match) {
+ // Range checks (c in min..max) are generally implemented by an unsigned
+ // (c - min) <= (max - min) check
+ switch (type) {
+ case 's':
+ // Match space-characters
+ if (mode_ == ASCII) {
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
+ Label success;
+ // Check for ' ' or 0x00a0.
+ __ Cmp(current_character(), ' ');
+ __ Ccmp(current_character(), 0x00a0, ZFlag, ne);
+ __ B(eq, &success);
+ // Check range 0x09..0x0d.
+ __ Sub(w10, current_character(), '\t');
+ CompareAndBranchOrBacktrack(w10, '\r' - '\t', hi, on_no_match);
+ __ Bind(&success);
+ return true;
+ }
+ return false;
+ case 'S':
+ // The emitted code for generic character classes is good enough.
+ return false;
+ case 'd':
+ // Match ASCII digits ('0'..'9').
+ __ Sub(w10, current_character(), '0');
+ CompareAndBranchOrBacktrack(w10, '9' - '0', hi, on_no_match);
+ return true;
+ case 'D':
+ // Match ASCII non-digits.
+ __ Sub(w10, current_character(), '0');
+ CompareAndBranchOrBacktrack(w10, '9' - '0', ls, on_no_match);
+ return true;
+ case '.': {
+ // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // Here we emit the conditional branch only once at the end to make branch
+ // prediction more efficient, even though we could branch out of here
+ // as soon as a character matches.
+ __ Cmp(current_character(), 0x0a);
+ __ Ccmp(current_character(), 0x0d, ZFlag, ne);
+ if (mode_ == UC16) {
+ __ Sub(w10, current_character(), 0x2028);
+ // If the Z flag was set we clear the flags to force a branch.
+ __ Ccmp(w10, 0x2029 - 0x2028, NoFlag, ne);
+ // ls -> !((C==1) && (Z==0))
+ BranchOrBacktrack(ls, on_no_match);
+ } else {
+ BranchOrBacktrack(eq, on_no_match);
+ }
+ return true;
+ }
+ case 'n': {
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ // We have to check all 4 newline characters before emitting
+ // the conditional branch.
+ __ Cmp(current_character(), 0x0a);
+ __ Ccmp(current_character(), 0x0d, ZFlag, ne);
+ if (mode_ == UC16) {
+ __ Sub(w10, current_character(), 0x2028);
+ // If the Z flag was set we clear the flags to force a fall-through.
+ __ Ccmp(w10, 0x2029 - 0x2028, NoFlag, ne);
+ // hi -> (C==1) && (Z==0)
+ BranchOrBacktrack(hi, on_no_match);
+ } else {
+ BranchOrBacktrack(ne, on_no_match);
+ }
+ return true;
+ }
+ case 'w': {
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ CompareAndBranchOrBacktrack(current_character(), 'z', hi, on_no_match);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ Mov(x10, map);
+ __ Ldrb(w10, MemOperand(x10, current_character(), UXTW));
+ CompareAndBranchOrBacktrack(w10, 0, eq, on_no_match);
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ Cmp(current_character(), 'z');
+ __ B(hi, &done);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ Mov(x10, map);
+ __ Ldrb(w10, MemOperand(x10, current_character(), UXTW));
+ CompareAndBranchOrBacktrack(w10, 0, ne, on_no_match);
+ __ Bind(&done);
+ return true;
+ }
+ case '*':
+ // Match any character.
+ return true;
+ // No custom implementation (yet): s(UC16), S(UC16).
+ default:
+ return false;
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::Fail() {
+ __ Mov(w0, FAILURE);
+ __ B(&exit_label_);
+}
+
+
+Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
+ Label return_w0;
+ // Finalize code - write the entry point code now we know how many
+ // registers we need.
+
+ // Entry code:
+ __ Bind(&entry_label_);
+
+ // Arguments on entry:
+ // x0: String* input
+ // x1: int start_offset
+ // x2: byte* input_start
+ // x3: byte* input_end
+ // x4: int* output array
+ // x5: int output array size
+ // x6: Address stack_base
+ // x7: int direct_call
+
+ // The stack pointer should be csp on entry.
+ // csp[8]: address of the current isolate
+ // csp[0]: secondary link/return address used by native call
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL, no
+ // code is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Push registers on the stack, only push the argument registers that we need.
+ CPURegList argument_registers(x0, x5, x6, x7);
+
+ CPURegList registers_to_retain = kCalleeSaved;
+ ASSERT(kCalleeSaved.Count() == 11);
+ registers_to_retain.Combine(lr);
+
+ ASSERT(csp.Is(__ StackPointer()));
+ __ PushCPURegList(registers_to_retain);
+ __ PushCPURegList(argument_registers);
+
+ // Set frame pointer in place.
+ __ Add(frame_pointer(), csp, argument_registers.Count() * kPointerSize);
+
+ // Initialize callee-saved registers.
+ __ Mov(start_offset(), w1);
+ __ Mov(input_start(), x2);
+ __ Mov(input_end(), x3);
+ __ Mov(output_array(), x4);
+
+ // Set the number of registers we will need to allocate, that is:
+ // - success_counter (X register)
+ // - (num_registers_ - kNumCachedRegisters) (W registers)
+ int num_wreg_to_allocate = num_registers_ - kNumCachedRegisters;
+ // Do not allocate registers on the stack if they can all be cached.
+ if (num_wreg_to_allocate < 0) { num_wreg_to_allocate = 0; }
+ // Make room for the success_counter.
+ num_wreg_to_allocate += 2;
+
+ // Make sure the stack alignment will be respected.
+ int alignment = masm_->ActivationFrameAlignment();
+ ASSERT_EQ(alignment % 16, 0);
+ int align_mask = (alignment / kWRegSize) - 1;
+ num_wreg_to_allocate = (num_wreg_to_allocate + align_mask) & ~align_mask;
+
+ // Check if we have space on the stack.
+ Label stack_limit_hit;
+ Label stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ Mov(x10, stack_limit);
+ __ Ldr(x10, MemOperand(x10));
+ __ Subs(x10, csp, x10);
+
+ // Handle it if the stack pointer is already below the stack limit.
+ __ B(ls, &stack_limit_hit);
+
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ Cmp(x10, num_wreg_to_allocate * kWRegSize);
+ __ B(hs, &stack_ok);
+
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ Mov(w0, EXCEPTION);
+ __ B(&return_w0);
+
+ __ Bind(&stack_limit_hit);
+ CallCheckStackGuardState(x10);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ Cbnz(w0, &return_w0);
+
+ __ Bind(&stack_ok);
+
+ // Allocate space on stack.
+ __ Claim(num_wreg_to_allocate, kWRegSize);
+
+ // Initialize success_counter with 0.
+ __ Str(wzr, MemOperand(frame_pointer(), kSuccessCounter));
+
+ // Find negative length (offset of start relative to end).
+ __ Sub(x10, input_start(), input_end());
+ if (masm_->emit_debug_code()) {
+ // Check that the input string length is < 2^30.
+ __ Neg(x11, x10);
+ __ Cmp(x11, (1<<30) - 1);
+ __ Check(ls, kInputStringTooLong);
+ }
+ __ Mov(current_input_offset(), w10);
+
+ // The non-position value is used as a clearing value for the
+ // capture registers, it corresponds to the position of the first character
+ // minus one.
+ __ Sub(non_position_value(), current_input_offset(), char_size());
+ __ Sub(non_position_value(), non_position_value(),
+ Operand(start_offset(), LSL, (mode_ == UC16) ? 1 : 0));
+ // We can store this value twice in an X register for initializing
+ // on-stack registers later.
+ __ Orr(twice_non_position_value(),
+ non_position_value().X(),
+ Operand(non_position_value().X(), LSL, kWRegSizeInBits));
+
+ // Initialize code pointer register.
+ __ Mov(code_pointer(), Operand(masm_->CodeObject()));
+
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ Cbnz(start_offset(), &load_char_start_regexp);
+ __ Mov(current_character(), '\n');
+ __ B(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ Bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ Bind(&start_regexp);
+ // Initialize on-stack registers.
+ if (num_saved_registers_ > 0) {
+ ClearRegisters(0, num_saved_registers_ - 1);
+ }
+
+ // Initialize backtrack stack pointer.
+ __ Ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackBase));
+
+ // Execute
+ __ B(&start_label_);
+
+ if (backtrack_label_.is_linked()) {
+ __ Bind(&backtrack_label_);
+ Backtrack();
+ }
+
+ if (success_label_.is_linked()) {
+ Register first_capture_start = w15;
+
+ // Save captures when successful.
+ __ Bind(&success_label_);
+
+ if (num_saved_registers_ > 0) {
+ // V8 expects the output to be an int32_t array.
+ Register capture_start = w12;
+ Register capture_end = w13;
+ Register input_length = w14;
+
+ // Copy captures to output.
+
+ // Get string length.
+ __ Sub(x10, input_end(), input_start());
+ if (masm_->emit_debug_code()) {
+ // Check that the input string length is < 2^30.
+ __ Cmp(x10, (1<<30) - 1);
+ __ Check(ls, kInputStringTooLong);
+ }
+ // input_start has a start_offset offset on entry. We need to include
+ // it when computing the length of the whole string.
+ if (mode_ == UC16) {
+ __ Add(input_length, start_offset(), Operand(w10, LSR, 1));
+ } else {
+ __ Add(input_length, start_offset(), w10);
+ }
+
+ // Copy the results to the output array from the cached registers first.
+ for (int i = 0;
+ (i < num_saved_registers_) && (i < kNumCachedRegisters);
+ i += 2) {
+ __ Mov(capture_start.X(), GetCachedRegister(i));
+ __ Lsr(capture_end.X(), capture_start.X(), kWRegSizeInBits);
+ if ((i == 0) && global_with_zero_length_check()) {
+ // Keep capture start for the zero-length check later.
+ __ Mov(first_capture_start, capture_start);
+ }
+ // Offsets need to be relative to the start of the string.
+ if (mode_ == UC16) {
+ __ Add(capture_start, input_length, Operand(capture_start, ASR, 1));
+ __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
+ } else {
+ __ Add(capture_start, input_length, capture_start);
+ __ Add(capture_end, input_length, capture_end);
+ }
+ // The output pointer advances for a possible global match.
+ __ Stp(capture_start,
+ capture_end,
+ MemOperand(output_array(), kPointerSize, PostIndex));
+ }
+
+ // Only carry on if there are more than kNumCachedRegisters capture
+ // registers.
+ int num_registers_left_on_stack =
+ num_saved_registers_ - kNumCachedRegisters;
+ if (num_registers_left_on_stack > 0) {
+ Register base = x10;
+ // There are always an even number of capture registers. A couple of
+ // registers determine one match with two offsets.
+ ASSERT_EQ(0, num_registers_left_on_stack % 2);
+ __ Add(base, frame_pointer(), kFirstCaptureOnStack);
+
+ // We can unroll the loop here, we should not unroll for less than 2
+ // registers.
+ STATIC_ASSERT(kNumRegistersToUnroll > 2);
+ if (num_registers_left_on_stack <= kNumRegistersToUnroll) {
+ for (int i = 0; i < num_registers_left_on_stack / 2; i++) {
+ __ Ldp(capture_end,
+ capture_start,
+ MemOperand(base, -kPointerSize, PostIndex));
+ if ((i == 0) && global_with_zero_length_check()) {
+ // Keep capture start for the zero-length check later.
+ __ Mov(first_capture_start, capture_start);
+ }
+ // Offsets need to be relative to the start of the string.
+ if (mode_ == UC16) {
+ __ Add(capture_start,
+ input_length,
+ Operand(capture_start, ASR, 1));
+ __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
+ } else {
+ __ Add(capture_start, input_length, capture_start);
+ __ Add(capture_end, input_length, capture_end);
+ }
+ // The output pointer advances for a possible global match.
+ __ Stp(capture_start,
+ capture_end,
+ MemOperand(output_array(), kPointerSize, PostIndex));
+ }
+ } else {
+ Label loop, start;
+ __ Mov(x11, num_registers_left_on_stack);
+
+ __ Ldp(capture_end,
+ capture_start,
+ MemOperand(base, -kPointerSize, PostIndex));
+ if (global_with_zero_length_check()) {
+ __ Mov(first_capture_start, capture_start);
+ }
+ __ B(&start);
+
+ __ Bind(&loop);
+ __ Ldp(capture_end,
+ capture_start,
+ MemOperand(base, -kPointerSize, PostIndex));
+ __ Bind(&start);
+ if (mode_ == UC16) {
+ __ Add(capture_start, input_length, Operand(capture_start, ASR, 1));
+ __ Add(capture_end, input_length, Operand(capture_end, ASR, 1));
+ } else {
+ __ Add(capture_start, input_length, capture_start);
+ __ Add(capture_end, input_length, capture_end);
+ }
+ // The output pointer advances for a possible global match.
+ __ Stp(capture_start,
+ capture_end,
+ MemOperand(output_array(), kPointerSize, PostIndex));
+ __ Sub(x11, x11, 2);
+ __ Cbnz(x11, &loop);
+ }
+ }
+ }
+
+ if (global()) {
+ Register success_counter = w0;
+ Register output_size = x10;
+ // Restart matching if the regular expression is flagged as global.
+
+ // Increment success counter.
+ __ Ldr(success_counter, MemOperand(frame_pointer(), kSuccessCounter));
+ __ Add(success_counter, success_counter, 1);
+ __ Str(success_counter, MemOperand(frame_pointer(), kSuccessCounter));
+
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ Ldr(output_size, MemOperand(frame_pointer(), kOutputSize));
+ __ Sub(output_size, output_size, num_saved_registers_);
+ // Check whether we have enough room for another set of capture results.
+ __ Cmp(output_size, num_saved_registers_);
+ __ B(lt, &return_w0);
+
+ // The output pointer is already set to the next field in the output
+ // array.
+ // Update output size on the frame before we restart matching.
+ __ Str(output_size, MemOperand(frame_pointer(), kOutputSize));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ __ Cmp(current_input_offset(), first_capture_start);
+ // Not a zero-length match, restart.
+ __ B(ne, &load_char_start_regexp);
+ // Offset from the end is zero if we already reached the end.
+ __ Cbz(current_input_offset(), &return_w0);
+ // Advance current position after a zero-length match.
+ __ Add(current_input_offset(),
+ current_input_offset(),
+ Operand((mode_ == UC16) ? 2 : 1));
+ }
+
+ __ B(&load_char_start_regexp);
+ } else {
+ __ Mov(w0, SUCCESS);
+ }
+ }
+
+ if (exit_label_.is_linked()) {
+ // Exit and return w0
+ __ Bind(&exit_label_);
+ if (global()) {
+ __ Ldr(w0, MemOperand(frame_pointer(), kSuccessCounter));
+ }
+ }
+
+ __ Bind(&return_w0);
+
+ // Set stack pointer back to first register to retain
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Mov(csp, fp);
+
+ // Restore registers.
+ __ PopCPURegList(registers_to_retain);
+
+ __ Ret();
+
+ Label exit_with_exception;
+ // Registers x0 to x7 are used to store the first captures, they need to be
+ // retained over calls to C++ code.
+ CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7);
+ ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
+
+ if (check_preempt_label_.is_linked()) {
+ __ Bind(&check_preempt_label_);
+ SaveLinkRegister();
+ // The cached registers need to be retained.
+ __ PushCPURegList(cached_registers);
+ CallCheckStackGuardState(x10);
+ // Returning from the regexp code restores the stack (csp <- fp)
+ // so we don't need to drop the link register from it before exiting.
+ __ Cbnz(w0, &return_w0);
+ // Reset the cached registers.
+ __ PopCPURegList(cached_registers);
+ RestoreLinkRegister();
+ __ Ret();
+ }
+
+ if (stack_overflow_label_.is_linked()) {
+ __ Bind(&stack_overflow_label_);
+ SaveLinkRegister();
+ // The cached registers need to be retained.
+ __ PushCPURegList(cached_registers);
+ // Call GrowStack(backtrack_stackpointer(), &stack_base)
+ __ Mov(x2, ExternalReference::isolate_address(isolate()));
+ __ Add(x1, frame_pointer(), kStackBase);
+ __ Mov(x0, backtrack_stackpointer());
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack(isolate());
+ __ CallCFunction(grow_stack, 3);
+ // If return NULL, we have failed to grow the stack, and
+ // must exit with a stack-overflow exception.
+ // Returning from the regexp code restores the stack (csp <- fp)
+ // so we don't need to drop the link register from it before exiting.
+ __ Cbz(w0, &exit_with_exception);
+ // Otherwise use return value as new stack pointer.
+ __ Mov(backtrack_stackpointer(), x0);
+ // Reset the cached registers.
+ __ PopCPURegList(cached_registers);
+ RestoreLinkRegister();
+ __ Ret();
+ }
+
+ if (exit_with_exception.is_linked()) {
+ __ Bind(&exit_with_exception);
+ __ Mov(w0, EXCEPTION);
+ __ B(&return_w0);
+ }
+
+ CodeDesc code_desc;
+ masm_->GetCode(&code_desc);
+ Handle<Code> code = isolate()->factory()->NewCode(
+ code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+ PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+ return Handle<HeapObject>::cast(code);
+}
+
+
+void RegExpMacroAssemblerARM64::GoTo(Label* to) {
+ BranchOrBacktrack(al, to);
+}
+
+void RegExpMacroAssemblerARM64::IfRegisterGE(int reg, int comparand,
+ Label* if_ge) {
+ Register to_compare = GetRegister(reg, w10);
+ CompareAndBranchOrBacktrack(to_compare, comparand, ge, if_ge);
+}
+
+
+void RegExpMacroAssemblerARM64::IfRegisterLT(int reg, int comparand,
+ Label* if_lt) {
+ Register to_compare = GetRegister(reg, w10);
+ CompareAndBranchOrBacktrack(to_compare, comparand, lt, if_lt);
+}
+
+
+void RegExpMacroAssemblerARM64::IfRegisterEqPos(int reg, Label* if_eq) {
+ Register to_compare = GetRegister(reg, w10);
+ __ Cmp(to_compare, current_input_offset());
+ BranchOrBacktrack(eq, if_eq);
+}
+
+RegExpMacroAssembler::IrregexpImplementation
+ RegExpMacroAssemblerARM64::Implementation() {
+ return kARM64Implementation;
+}
+
+
+void RegExpMacroAssemblerARM64::LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds,
+ int characters) {
+ // TODO(pielan): Make sure long strings are caught before this, and not
+ // just asserted in debug mode.
+ ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
+ // Be sane! (And ensure that an int32_t can be used to index the string)
+ ASSERT(cp_offset < (1<<30));
+ if (check_bounds) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ }
+ LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerARM64::PopCurrentPosition() {
+ Pop(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerARM64::PopRegister(int register_index) {
+ Pop(w10);
+ StoreRegister(register_index, w10);
+}
+
+
+void RegExpMacroAssemblerARM64::PushBacktrack(Label* label) {
+ if (label->is_bound()) {
+ int target = label->pos();
+ __ Mov(w10, target + Code::kHeaderSize - kHeapObjectTag);
+ } else {
+ __ Adr(x10, label);
+ __ Sub(x10, x10, code_pointer());
+ if (masm_->emit_debug_code()) {
+ __ Cmp(x10, kWRegMask);
+ // The code offset has to fit in a W register.
+ __ Check(ls, kOffsetOutOfRange);
+ }
+ }
+ Push(w10);
+ CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerARM64::PushCurrentPosition() {
+ Push(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerARM64::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
+ Register to_push = GetRegister(register_index, w10);
+ Push(to_push);
+ if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerARM64::ReadCurrentPositionFromRegister(int reg) {
+ Register cached_register;
+ RegisterState register_state = GetRegisterState(reg);
+ switch (register_state) {
+ case STACKED:
+ __ Ldr(current_input_offset(), register_location(reg));
+ break;
+ case CACHED_LSW:
+ cached_register = GetCachedRegister(reg);
+ __ Mov(current_input_offset(), cached_register.W());
+ break;
+ case CACHED_MSW:
+ cached_register = GetCachedRegister(reg);
+ __ Lsr(current_input_offset().X(), cached_register, kWRegSizeInBits);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::ReadStackPointerFromRegister(int reg) {
+ Register read_from = GetRegister(reg, w10);
+ __ Ldr(x11, MemOperand(frame_pointer(), kStackBase));
+ __ Add(backtrack_stackpointer(), x11, Operand(read_from, SXTW));
+}
+
+
+void RegExpMacroAssemblerARM64::SetCurrentPositionFromEnd(int by) {
+ Label after_position;
+ __ Cmp(current_input_offset(), -by * char_size());
+ __ B(ge, &after_position);
+ __ Mov(current_input_offset(), -by * char_size());
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ Bind(&after_position);
+}
+
+
+void RegExpMacroAssemblerARM64::SetRegister(int register_index, int to) {
+ ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
+ Register set_to = wzr;
+ if (to != 0) {
+ set_to = w10;
+ __ Mov(set_to, to);
+ }
+ StoreRegister(register_index, set_to);
+}
+
+
+bool RegExpMacroAssemblerARM64::Succeed() {
+ __ B(&success_label_);
+ return global();
+}
+
+
+void RegExpMacroAssemblerARM64::WriteCurrentPositionToRegister(int reg,
+ int cp_offset) {
+ Register position = current_input_offset();
+ if (cp_offset != 0) {
+ position = w10;
+ __ Add(position, current_input_offset(), cp_offset * char_size());
+ }
+ StoreRegister(reg, position);
+}
+
+
+void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
+ ASSERT(reg_from <= reg_to);
+ int num_registers = reg_to - reg_from + 1;
+
+ // If the first capture register is cached in a hardware register but not
+ // aligned on a 64-bit one, we need to clear the first one specifically.
+ if ((reg_from < kNumCachedRegisters) && ((reg_from % 2) != 0)) {
+ StoreRegister(reg_from, non_position_value());
+ num_registers--;
+ reg_from++;
+ }
+
+ // Clear cached registers in pairs as far as possible.
+ while ((num_registers >= 2) && (reg_from < kNumCachedRegisters)) {
+ ASSERT(GetRegisterState(reg_from) == CACHED_LSW);
+ __ Mov(GetCachedRegister(reg_from), twice_non_position_value());
+ reg_from += 2;
+ num_registers -= 2;
+ }
+
+ if ((num_registers % 2) == 1) {
+ StoreRegister(reg_from, non_position_value());
+ num_registers--;
+ reg_from++;
+ }
+
+ if (num_registers > 0) {
+ // If there are some remaining registers, they are stored on the stack.
+ ASSERT(reg_from >= kNumCachedRegisters);
+
+ // Move down the indexes of the registers on stack to get the correct offset
+ // in memory.
+ reg_from -= kNumCachedRegisters;
+ reg_to -= kNumCachedRegisters;
+ // We should not unroll the loop for less than 2 registers.
+ STATIC_ASSERT(kNumRegistersToUnroll > 2);
+ // We position the base pointer to (reg_from + 1).
+ int base_offset = kFirstRegisterOnStack -
+ kWRegSize - (kWRegSize * reg_from);
+ if (num_registers > kNumRegistersToUnroll) {
+ Register base = x10;
+ __ Add(base, frame_pointer(), base_offset);
+
+ Label loop;
+ __ Mov(x11, num_registers);
+ __ Bind(&loop);
+ __ Str(twice_non_position_value(),
+ MemOperand(base, -kPointerSize, PostIndex));
+ __ Sub(x11, x11, 2);
+ __ Cbnz(x11, &loop);
+ } else {
+ for (int i = reg_from; i <= reg_to; i += 2) {
+ __ Str(twice_non_position_value(),
+ MemOperand(frame_pointer(), base_offset));
+ base_offset -= kWRegSize * 2;
+ }
+ }
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::WriteStackPointerToRegister(int reg) {
+ __ Ldr(x10, MemOperand(frame_pointer(), kStackBase));
+ __ Sub(x10, backtrack_stackpointer(), x10);
+ if (masm_->emit_debug_code()) {
+ __ Cmp(x10, Operand(w10, SXTW));
+ // The stack offset needs to fit in a W register.
+ __ Check(eq, kOffsetOutOfRange);
+ }
+ StoreRegister(reg, w10);
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ return *reinterpret_cast<T*>(re_frame + frame_offset);
+}
+
+
+int RegExpMacroAssemblerARM64::CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame,
+ int start_offset,
+ const byte** input_start,
+ const byte** input_end) {
+ Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ isolate->StackOverflow();
+ return EXCEPTION;
+ }
+
+ // If not real stack overflow the stack guard was used to interrupt
+ // execution for another purpose.
+
+ // If this is a direct call from JavaScript retry the RegExp forcing the call
+ // through the runtime system. Currently the direct call cannot handle a GC.
+ if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+ return RETRY;
+ }
+
+ // Prepare for possible GC.
+ HandleScope handles(isolate);
+ Handle<Code> code_handle(re_code);
+
+ Handle<String> subject(frame_entry<String*>(re_frame, kInput));
+
+ // Current string.
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+
+ ASSERT(re_code->instruction_start() <= *return_address);
+ ASSERT(*return_address <=
+ re_code->instruction_start() + re_code->instruction_size());
+
+ MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
+
+ if (*code_handle != re_code) { // Return address no longer valid
+ int delta = code_handle->address() - re_code->address();
+ // Overwrite the return address on the stack.
+ *return_address += delta;
+ }
+
+ if (result->IsException()) {
+ return EXCEPTION;
+ }
+
+ Handle<String> subject_tmp = subject;
+ int slice_offset = 0;
+
+ // Extract the underlying string and the slice offset.
+ if (StringShape(*subject_tmp).IsCons()) {
+ subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+ } else if (StringShape(*subject_tmp).IsSliced()) {
+ SlicedString* slice = SlicedString::cast(*subject_tmp);
+ subject_tmp = Handle<String>(slice->parent());
+ slice_offset = slice->offset();
+ }
+
+ // String might have changed.
+ if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
+ // If we changed between an ASCII and an UC16 string, the specialized
+ // code cannot be used, and we need to restart regexp matching from
+ // scratch (including, potentially, compiling a new version of the code).
+ return RETRY;
+ }
+
+ // Otherwise, the content of the string might have moved. It must still
+ // be a sequential or external string with the same content.
+ // Update the start and end pointers in the stack frame to the current
+ // location (whether it has actually moved or not).
+ ASSERT(StringShape(*subject_tmp).IsSequential() ||
+ StringShape(*subject_tmp).IsExternal());
+
+ // The original start address of the characters to match.
+ const byte* start_address = *input_start;
+
+ // Find the current start address of the same character at the current string
+ // position.
+ const byte* new_address = StringCharacterPosition(*subject_tmp,
+ start_offset + slice_offset);
+
+ if (start_address != new_address) {
+ // If there is a difference, update the object pointer and start and end
+ // addresses in the RegExp stack frame to match the new value.
+ const byte* end_address = *input_end;
+ int byte_length = static_cast<int>(end_address - start_address);
+ frame_entry<const String*>(re_frame, kInput) = *subject;
+ *input_start = new_address;
+ *input_end = new_address + byte_length;
+ } else if (frame_entry<const String*>(re_frame, kInput) != *subject) {
+ // Subject string might have been a ConsString that underwent
+ // short-circuiting during GC. That will not change start_address but
+ // will change pointer inside the subject handle.
+ frame_entry<const String*>(re_frame, kInput) = *subject;
+ }
+
+ return 0;
+}
+
+
+void RegExpMacroAssemblerARM64::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ CompareAndBranchOrBacktrack(current_input_offset(),
+ -cp_offset * char_size(),
+ ge,
+ on_outside_input);
+}
+
+
+bool RegExpMacroAssemblerARM64::CanReadUnaligned() {
+ // TODO(pielan): See whether or not we should disable unaligned accesses.
+ return !slow_safe();
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
+ // Allocate space on the stack to store the return address. The
+ // CheckStackGuardState C++ function will override it if the code
+ // moved. Allocate extra space for 2 arguments passed by pointers.
+ // AAPCS64 requires the stack to be 16 byte aligned.
+ int alignment = masm_->ActivationFrameAlignment();
+ ASSERT_EQ(alignment % 16, 0);
+ int align_mask = (alignment / kXRegSize) - 1;
+ int xreg_to_claim = (3 + align_mask) & ~align_mask;
+
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Claim(xreg_to_claim);
+
+ // CheckStackGuardState needs the end and start addresses of the input string.
+ __ Poke(input_end(), 2 * kPointerSize);
+ __ Add(x5, csp, 2 * kPointerSize);
+ __ Poke(input_start(), kPointerSize);
+ __ Add(x4, csp, kPointerSize);
+
+ __ Mov(w3, start_offset());
+ // RegExp code frame pointer.
+ __ Mov(x2, frame_pointer());
+ // Code* of self.
+ __ Mov(x1, Operand(masm_->CodeObject()));
+
+ // We need to pass a pointer to the return address as first argument.
+ // The DirectCEntry stub will place the return address on the stack before
+ // calling so the stack pointer will point to it.
+ __ Mov(x0, csp);
+
+ ExternalReference check_stack_guard_state =
+ ExternalReference::re_check_stack_guard_state(isolate());
+ __ Mov(scratch, check_stack_guard_state);
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm_, scratch);
+
+ // The input string may have been moved in memory, we need to reload it.
+ __ Peek(input_start(), kPointerSize);
+ __ Peek(input_end(), 2 * kPointerSize);
+
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Drop(xreg_to_claim);
+
+ // Reload the Code pointer.
+ __ Mov(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+void RegExpMacroAssemblerARM64::BranchOrBacktrack(Condition condition,
+ Label* to) {
+ if (condition == al) { // Unconditional.
+ if (to == NULL) {
+ Backtrack();
+ return;
+ }
+ __ B(to);
+ return;
+ }
+ if (to == NULL) {
+ to = &backtrack_label_;
+ }
+ // TODO(ulan): do direct jump when jump distance is known and fits in imm19.
+ Condition inverted_condition = InvertCondition(condition);
+ Label no_branch;
+ __ B(inverted_condition, &no_branch);
+ __ B(to);
+ __ Bind(&no_branch);
+}
+
+void RegExpMacroAssemblerARM64::CompareAndBranchOrBacktrack(Register reg,
+ int immediate,
+ Condition condition,
+ Label* to) {
+ if ((immediate == 0) && ((condition == eq) || (condition == ne))) {
+ if (to == NULL) {
+ to = &backtrack_label_;
+ }
+ // TODO(ulan): do direct jump when jump distance is known and fits in imm19.
+ Label no_branch;
+ if (condition == eq) {
+ __ Cbnz(reg, &no_branch);
+ } else {
+ __ Cbz(reg, &no_branch);
+ }
+ __ B(to);
+ __ Bind(&no_branch);
+ } else {
+ __ Cmp(reg, immediate);
+ BranchOrBacktrack(condition, to);
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::CheckPreemption() {
+ // Check for preemption.
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ Mov(x10, stack_limit);
+ __ Ldr(x10, MemOperand(x10));
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Cmp(csp, x10);
+ CallIf(&check_preempt_label_, ls);
+}
+
+
+void RegExpMacroAssemblerARM64::CheckStackLimit() {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit(isolate());
+ __ Mov(x10, stack_limit);
+ __ Ldr(x10, MemOperand(x10));
+ __ Cmp(backtrack_stackpointer(), x10);
+ CallIf(&stack_overflow_label_, ls);
+}
+
+
+void RegExpMacroAssemblerARM64::Push(Register source) {
+ ASSERT(source.Is32Bits());
+ ASSERT(!source.is(backtrack_stackpointer()));
+ __ Str(source,
+ MemOperand(backtrack_stackpointer(),
+ -static_cast<int>(kWRegSize),
+ PreIndex));
+}
+
+
+void RegExpMacroAssemblerARM64::Pop(Register target) {
+ ASSERT(target.Is32Bits());
+ ASSERT(!target.is(backtrack_stackpointer()));
+ __ Ldr(target,
+ MemOperand(backtrack_stackpointer(), kWRegSize, PostIndex));
+}
+
+
+Register RegExpMacroAssemblerARM64::GetCachedRegister(int register_index) {
+ ASSERT(register_index < kNumCachedRegisters);
+ return Register::Create(register_index / 2, kXRegSizeInBits);
+}
+
+
+Register RegExpMacroAssemblerARM64::GetRegister(int register_index,
+ Register maybe_result) {
+ ASSERT(maybe_result.Is32Bits());
+ ASSERT(register_index >= 0);
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ Register result;
+ RegisterState register_state = GetRegisterState(register_index);
+ switch (register_state) {
+ case STACKED:
+ __ Ldr(maybe_result, register_location(register_index));
+ result = maybe_result;
+ break;
+ case CACHED_LSW:
+ result = GetCachedRegister(register_index).W();
+ break;
+ case CACHED_MSW:
+ __ Lsr(maybe_result.X(), GetCachedRegister(register_index),
+ kWRegSizeInBits);
+ result = maybe_result;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ ASSERT(result.Is32Bits());
+ return result;
+}
+
+
+void RegExpMacroAssemblerARM64::StoreRegister(int register_index,
+ Register source) {
+ ASSERT(source.Is32Bits());
+ ASSERT(register_index >= 0);
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+
+ Register cached_register;
+ RegisterState register_state = GetRegisterState(register_index);
+ switch (register_state) {
+ case STACKED:
+ __ Str(source, register_location(register_index));
+ break;
+ case CACHED_LSW:
+ cached_register = GetCachedRegister(register_index);
+ if (!source.Is(cached_register.W())) {
+ __ Bfi(cached_register, source.X(), 0, kWRegSizeInBits);
+ }
+ break;
+ case CACHED_MSW:
+ cached_register = GetCachedRegister(register_index);
+ __ Bfi(cached_register, source.X(), kWRegSizeInBits, kWRegSizeInBits);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void RegExpMacroAssemblerARM64::CallIf(Label* to, Condition condition) {
+ Label skip_call;
+ if (condition != al) __ B(&skip_call, InvertCondition(condition));
+ __ Bl(to);
+ __ Bind(&skip_call);
+}
+
+
+void RegExpMacroAssemblerARM64::RestoreLinkRegister() {
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Pop(lr, xzr);
+ __ Add(lr, lr, Operand(masm_->CodeObject()));
+}
+
+
+void RegExpMacroAssemblerARM64::SaveLinkRegister() {
+ ASSERT(csp.Is(__ StackPointer()));
+ __ Sub(lr, lr, Operand(masm_->CodeObject()));
+ __ Push(xzr, lr);
+}
+
+
+MemOperand RegExpMacroAssemblerARM64::register_location(int register_index) {
+ ASSERT(register_index < (1<<30));
+ ASSERT(register_index >= kNumCachedRegisters);
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ register_index -= kNumCachedRegisters;
+ int offset = kFirstRegisterOnStack - register_index * kWRegSize;
+ return MemOperand(frame_pointer(), offset);
+}
+
+MemOperand RegExpMacroAssemblerARM64::capture_location(int register_index,
+ Register scratch) {
+ ASSERT(register_index < (1<<30));
+ ASSERT(register_index < num_saved_registers_);
+ ASSERT(register_index >= kNumCachedRegisters);
+ ASSERT_EQ(register_index % 2, 0);
+ register_index -= kNumCachedRegisters;
+ int offset = kFirstCaptureOnStack - register_index * kWRegSize;
+ // capture_location is used with Stp instructions to load/store 2 registers.
+ // The immediate field in the encoding is limited to 7 bits (signed).
+ if (is_int7(offset)) {
+ return MemOperand(frame_pointer(), offset);
+ } else {
+ __ Add(scratch, frame_pointer(), offset);
+ return MemOperand(scratch);
+ }
+}
+
+void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
+ int characters) {
+ Register offset = current_input_offset();
+
+ // The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
+ // and the operating system running on the target allow it.
+ // If unaligned load/stores are not supported then this function must only
+ // be used to load a single character at a time.
+
+ // ARMv8 supports unaligned accesses but V8 or the kernel can decide to
+ // disable it.
+ // TODO(pielan): See whether or not we should disable unaligned accesses.
+ if (!CanReadUnaligned()) {
+ ASSERT(characters == 1);
+ }
+
+ if (cp_offset != 0) {
+ if (masm_->emit_debug_code()) {
+ __ Mov(x10, cp_offset * char_size());
+ __ Add(x10, x10, Operand(current_input_offset(), SXTW));
+ __ Cmp(x10, Operand(w10, SXTW));
+ // The offset needs to fit in a W register.
+ __ Check(eq, kOffsetOutOfRange);
+ } else {
+ __ Add(w10, current_input_offset(), cp_offset * char_size());
+ }
+ offset = w10;
+ }
+
+ if (mode_ == ASCII) {
+ if (characters == 4) {
+ __ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
+ } else if (characters == 2) {
+ __ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
+ } else {
+ ASSERT(characters == 1);
+ __ Ldrb(current_character(), MemOperand(input_end(), offset, SXTW));
+ }
+ } else {
+ ASSERT(mode_ == UC16);
+ if (characters == 2) {
+ __ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
+ } else {
+ ASSERT(characters == 1);
+ __ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
+ }
+ }
+}
+
+#endif // V8_INTERPRETED_REGEXP
+
+}} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
new file mode 100644
index 000000000..534fd5b01
--- /dev/null
+++ b/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
@@ -0,0 +1,315 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
+#define V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
+
+#include "arm64/assembler-arm64.h"
+#include "arm64/assembler-arm64-inl.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+#ifndef V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerARM64(Mode mode, int registers_to_save, Zone* zone);
+ virtual ~RegExpMacroAssemblerARM64();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(Label* on_at_start);
+ virtual void CheckCharacter(unsigned c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(unsigned c,
+ unsigned mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+ virtual void CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ Label* on_no_match);
+ virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(unsigned c,
+ unsigned mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal);
+ virtual void CheckCharacterInRange(uc16 from,
+ uc16 to,
+ Label* on_in_range);
+ virtual void CheckCharacterNotInRange(uc16 from,
+ uc16 to,
+ Label* on_not_in_range);
+ virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds = true,
+ int characters = 1);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
+ virtual void SetRegister(int register_index, int to);
+ virtual bool Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+ virtual bool CanReadUnaligned();
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ static int CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame,
+ int start_offset,
+ const byte** input_start,
+ const byte** input_end);
+
+ private:
+ // Above the frame pointer - Stored registers and stack passed parameters.
+ // Callee-saved registers x19-x29, where x29 is the old frame pointer.
+ static const int kCalleeSavedRegisters = 0;
+ // Return address.
+ // It is placed above the 11 callee-saved registers.
+ static const int kReturnAddress = kCalleeSavedRegisters + 11 * kPointerSize;
+ static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
+ // Stack parameter placed by caller.
+ static const int kIsolate = kSecondaryReturnAddress + kPointerSize;
+
+ // Below the frame pointer.
+ // Register parameters stored by setup code.
+ static const int kDirectCall = kCalleeSavedRegisters - kPointerSize;
+ static const int kStackBase = kDirectCall - kPointerSize;
+ static const int kOutputSize = kStackBase - kPointerSize;
+ static const int kInput = kOutputSize - kPointerSize;
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kSuccessCounter = kInput - kPointerSize;
+ // First position register address on the stack. Following positions are
+ // below it. A position is a 32 bit value.
+ static const int kFirstRegisterOnStack = kSuccessCounter - kWRegSize;
+ // A capture is a 64 bit value holding two position.
+ static const int kFirstCaptureOnStack = kSuccessCounter - kXRegSize;
+
+ // Initial size of code buffer.
+ static const size_t kRegExpCodeSize = 1024;
+
+ // When initializing registers to a non-position value we can unroll
+ // the loop. Set the limit of registers to unroll.
+ static const int kNumRegistersToUnroll = 16;
+
+ // We are using x0 to x7 as a register cache. Each hardware register must
+ // contain one capture, that is two 32 bit registers. We can cache at most
+ // 16 registers.
+ static const int kNumCachedRegisters = 16;
+
+ // Load a number of characters at the given offset from the
+ // current position, into the current-character register.
+ void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState(Register scratch);
+
+ // Location of a 32 bit position register.
+ MemOperand register_location(int register_index);
+
+ // Location of a 64 bit capture, combining two position registers.
+ MemOperand capture_location(int register_index, Register scratch);
+
+ // Register holding the current input position as negative offset from
+ // the end of the string.
+ Register current_input_offset() { return w21; }
+
+ // The register containing the current character after LoadCurrentCharacter.
+ Register current_character() { return w22; }
+
+ // Register holding address of the end of the input string.
+ Register input_end() { return x25; }
+
+ // Register holding address of the start of the input string.
+ Register input_start() { return x26; }
+
+ // Register holding the offset from the start of the string where we should
+ // start matching.
+ Register start_offset() { return w27; }
+
+ // Pointer to the output array's first element.
+ Register output_array() { return x28; }
+
+ // Register holding the frame address. Local variables, parameters and
+ // regexp registers are addressed relative to this.
+ Register frame_pointer() { return fp; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ Register backtrack_stackpointer() { return x23; }
+
+ // Register holding pointer to the current code object.
+ Register code_pointer() { return x20; }
+
+ // Register holding the value used for clearing capture registers.
+ Register non_position_value() { return w24; }
+ // The top 32 bit of this register is used to store this value
+ // twice. This is used for clearing more than one register at a time.
+ Register twice_non_position_value() { return x24; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument)
+ int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is NULL, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Condition condition, Label* to);
+
+ // Compares reg against immmediate before calling BranchOrBacktrack.
+ // It makes use of the Cbz and Cbnz instructions.
+ void CompareAndBranchOrBacktrack(Register reg,
+ int immediate,
+ Condition condition,
+ Label* to);
+
+ inline void CallIf(Label* to, Condition condition);
+
+ // Save and restore the link register on the stack in a way that
+ // is GC-safe.
+ inline void SaveLinkRegister();
+ inline void RestoreLinkRegister();
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // and increments it by a word size.
+ inline void Pop(Register target);
+
+ // This state indicates where the register actually is.
+ enum RegisterState {
+ STACKED, // Resides in memory.
+ CACHED_LSW, // Least Significant Word of a 64 bit hardware register.
+ CACHED_MSW // Most Significant Word of a 64 bit hardware register.
+ };
+
+ RegisterState GetRegisterState(int register_index) {
+ ASSERT(register_index >= 0);
+ if (register_index >= kNumCachedRegisters) {
+ return STACKED;
+ } else {
+ if ((register_index % 2) == 0) {
+ return CACHED_LSW;
+ } else {
+ return CACHED_MSW;
+ }
+ }
+ }
+
+ // Store helper that takes the state of the register into account.
+ inline void StoreRegister(int register_index, Register source);
+
+ // Returns a hardware W register that holds the value of the capture
+ // register.
+ //
+ // This function will try to use an existing cache register (w0-w7) for the
+ // result. Otherwise, it will load the value into maybe_result.
+ //
+ // If the returned register is anything other than maybe_result, calling code
+ // must not write to it.
+ inline Register GetRegister(int register_index, Register maybe_result);
+
+ // Returns the harware register (x0-x7) holding the value of the capture
+ // register.
+ // This assumes that the state of the register is not STACKED.
+ inline Register GetCachedRegister(int register_index);
+
+ Isolate* isolate() const { return masm_->isolate(); }
+
+ MacroAssembler* masm_;
+
+ // Which mode to generate code for (ASCII or UC16).
+ Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1)
+ int num_saved_registers_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+};
+
+#endif // V8_INTERPRETED_REGEXP
+
+
+}} // namespace v8::internal
+
+#endif // V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
new file mode 100644
index 000000000..cd475b40e
--- /dev/null
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -0,0 +1,3645 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+#include <cmath>
+#include <cstdarg>
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "disasm.h"
+#include "assembler.h"
+#include "arm64/decoder-arm64-inl.h"
+#include "arm64/simulator-arm64.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+#if defined(USE_SIMULATOR)
+
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent way through
+// ::v8::internal::OS in the same way as SNPrintF is that the
+// Windows C Run-Time Library does not provide vsscanf.
+#define SScanF sscanf // NOLINT
+
+
+// Helpers for colors.
+// Depending on your terminal configuration, the colour names may not match the
+// observed colours.
+#define COLOUR(colour_code) "\033[" colour_code "m"
+#define BOLD(colour_code) "1;" colour_code
+#define NORMAL ""
+#define GREY "30"
+#define GREEN "32"
+#define ORANGE "33"
+#define BLUE "34"
+#define PURPLE "35"
+#define INDIGO "36"
+#define WHITE "37"
+typedef char const * const TEXT_COLOUR;
+TEXT_COLOUR clr_normal = FLAG_log_colour ? COLOUR(NORMAL) : "";
+TEXT_COLOUR clr_flag_name = FLAG_log_colour ? COLOUR(BOLD(GREY)) : "";
+TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(BOLD(WHITE)) : "";
+TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR(BOLD(BLUE)) : "";
+TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(BOLD(INDIGO)) : "";
+TEXT_COLOUR clr_fpreg_name = FLAG_log_colour ? COLOUR(BOLD(ORANGE)) : "";
+TEXT_COLOUR clr_fpreg_value = FLAG_log_colour ? COLOUR(BOLD(PURPLE)) : "";
+TEXT_COLOUR clr_memory_value = FLAG_log_colour ? COLOUR(BOLD(GREEN)) : "";
+TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR(GREEN) : "";
+TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR(BOLD(ORANGE)) : "";
+TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(ORANGE) : "";
+TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : "";
+
+
+// This is basically the same as PrintF, with a guard for FLAG_trace_sim.
+void PRINTF_CHECKING TraceSim(const char* format, ...) {
+ if (FLAG_trace_sim) {
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VPrint(format, arguments);
+ va_end(arguments);
+ }
+}
+
+
+const Instruction* Simulator::kEndOfSimAddress = NULL;
+
+
+void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
+ int width = msb - lsb + 1;
+ ASSERT(is_uintn(bits, width) || is_intn(bits, width));
+
+ bits <<= lsb;
+ uint32_t mask = ((1 << width) - 1) << lsb;
+ ASSERT((mask & write_ignore_mask_) == 0);
+
+ value_ = (value_ & ~mask) | (bits & mask);
+}
+
+
+SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
+ switch (id) {
+ case NZCV:
+ return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask);
+ case FPCR:
+ return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask);
+ default:
+ UNREACHABLE();
+ return SimSystemRegister();
+ }
+}
+
+
+void Simulator::Initialize(Isolate* isolate) {
+ if (isolate->simulator_initialized()) return;
+ isolate->set_simulator_initialized(true);
+ ExternalReference::set_redirector(isolate, &RedirectExternalReference);
+}
+
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current(Isolate* isolate) {
+ Isolate::PerIsolateThreadData* isolate_data =
+ isolate->FindOrAllocatePerThreadDataForThisThread();
+ ASSERT(isolate_data != NULL);
+
+ Simulator* sim = isolate_data->simulator();
+ if (sim == NULL) {
+ if (FLAG_trace_sim || FLAG_log_instruction_stats || FLAG_debug_sim) {
+ sim = new Simulator(new Decoder<DispatchingDecoderVisitor>(), isolate);
+ } else {
+ sim = new Decoder<Simulator>();
+ sim->isolate_ = isolate;
+ }
+ isolate_data->set_simulator(sim);
+ }
+ return sim;
+}
+
+
+void Simulator::CallVoid(byte* entry, CallArgument* args) {
+ int index_x = 0;
+ int index_d = 0;
+
+ std::vector<int64_t> stack_args(0);
+ for (int i = 0; !args[i].IsEnd(); i++) {
+ CallArgument arg = args[i];
+ if (arg.IsX() && (index_x < 8)) {
+ set_xreg(index_x++, arg.bits());
+ } else if (arg.IsD() && (index_d < 8)) {
+ set_dreg_bits(index_d++, arg.bits());
+ } else {
+ ASSERT(arg.IsD() || arg.IsX());
+ stack_args.push_back(arg.bits());
+ }
+ }
+
+ // Process stack arguments, and make sure the stack is suitably aligned.
+ uintptr_t original_stack = sp();
+ uintptr_t entry_stack = original_stack -
+ stack_args.size() * sizeof(stack_args[0]);
+ if (OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -OS::ActivationFrameAlignment();
+ }
+ char * stack = reinterpret_cast<char*>(entry_stack);
+ std::vector<int64_t>::const_iterator it;
+ for (it = stack_args.begin(); it != stack_args.end(); it++) {
+ memcpy(stack, &(*it), sizeof(*it));
+ stack += sizeof(*it);
+ }
+
+ ASSERT(reinterpret_cast<uintptr_t>(stack) <= original_stack);
+ set_sp(entry_stack);
+
+ // Call the generated code.
+ set_pc(entry);
+ set_lr(kEndOfSimAddress);
+ CheckPCSComplianceAndRun();
+
+ set_sp(original_stack);
+}
+
+
+int64_t Simulator::CallInt64(byte* entry, CallArgument* args) {
+ CallVoid(entry, args);
+ return xreg(0);
+}
+
+
+double Simulator::CallDouble(byte* entry, CallArgument* args) {
+ CallVoid(entry, args);
+ return dreg(0);
+}
+
+
+int64_t Simulator::CallJS(byte* entry,
+ byte* function_entry,
+ JSFunction* func,
+ Object* revc,
+ int64_t argc,
+ Object*** argv) {
+ CallArgument args[] = {
+ CallArgument(function_entry),
+ CallArgument(func),
+ CallArgument(revc),
+ CallArgument(argc),
+ CallArgument(argv),
+ CallArgument::End()
+ };
+ return CallInt64(entry, args);
+}
+
+int64_t Simulator::CallRegExp(byte* entry,
+ String* input,
+ int64_t start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ int64_t output_size,
+ Address stack_base,
+ int64_t direct_call,
+ void* return_address,
+ Isolate* isolate) {
+ CallArgument args[] = {
+ CallArgument(input),
+ CallArgument(start_offset),
+ CallArgument(input_start),
+ CallArgument(input_end),
+ CallArgument(output),
+ CallArgument(output_size),
+ CallArgument(stack_base),
+ CallArgument(direct_call),
+ CallArgument(return_address),
+ CallArgument(isolate),
+ CallArgument::End()
+ };
+ return CallInt64(entry, args);
+}
+
+
+void Simulator::CheckPCSComplianceAndRun() {
+#ifdef DEBUG
+ CHECK_EQ(kNumberOfCalleeSavedRegisters, kCalleeSaved.Count());
+ CHECK_EQ(kNumberOfCalleeSavedFPRegisters, kCalleeSavedFP.Count());
+
+ int64_t saved_registers[kNumberOfCalleeSavedRegisters];
+ uint64_t saved_fpregisters[kNumberOfCalleeSavedFPRegisters];
+
+ CPURegList register_list = kCalleeSaved;
+ CPURegList fpregister_list = kCalleeSavedFP;
+
+ for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
+ // x31 is not a caller saved register, so no need to specify if we want
+ // the stack or zero.
+ saved_registers[i] = xreg(register_list.PopLowestIndex().code());
+ }
+ for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
+ saved_fpregisters[i] =
+ dreg_bits(fpregister_list.PopLowestIndex().code());
+ }
+ int64_t original_stack = sp();
+#endif
+ // Start the simulation!
+ Run();
+#ifdef DEBUG
+ CHECK_EQ(original_stack, sp());
+ // Check that callee-saved registers have been preserved.
+ register_list = kCalleeSaved;
+ fpregister_list = kCalleeSavedFP;
+ for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
+ CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
+ }
+ for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
+ ASSERT(saved_fpregisters[i] ==
+ dreg_bits(fpregister_list.PopLowestIndex().code()));
+ }
+
+ // Corrupt caller saved register minus the return regiters.
+
+ // In theory x0 to x7 can be used for return values, but V8 only uses x0, x1
+ // for now .
+ register_list = kCallerSaved;
+ register_list.Remove(x0);
+ register_list.Remove(x1);
+
+ // In theory d0 to d7 can be used for return values, but V8 only uses d0
+ // for now .
+ fpregister_list = kCallerSavedFP;
+ fpregister_list.Remove(d0);
+
+ CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue);
+ CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
+#endif
+}
+
+
+#ifdef DEBUG
+// The least significant byte of the curruption value holds the corresponding
+// register's code.
+void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) {
+ if (list->type() == CPURegister::kRegister) {
+ while (!list->IsEmpty()) {
+ unsigned code = list->PopLowestIndex().code();
+ set_xreg(code, value | code);
+ }
+ } else {
+ ASSERT(list->type() == CPURegister::kFPRegister);
+ while (!list->IsEmpty()) {
+ unsigned code = list->PopLowestIndex().code();
+ set_dreg_bits(code, value | code);
+ }
+ }
+}
+
+
+void Simulator::CorruptAllCallerSavedCPURegisters() {
+ // Corrupt alters its parameter so copy them first.
+ CPURegList register_list = kCallerSaved;
+ CPURegList fpregister_list = kCallerSavedFP;
+
+ CorruptRegisters(&register_list, kCallerSavedRegisterCorruptionValue);
+ CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue);
+}
+#endif
+
+
+// Extending the stack by 2 * 64 bits is required for stack alignment purposes.
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+ ASSERT(sizeof(uintptr_t) < 2 * kXRegSize);
+ intptr_t new_sp = sp() - 2 * kXRegSize;
+ uintptr_t* alignment_slot =
+ reinterpret_cast<uintptr_t*>(new_sp + kXRegSize);
+ memcpy(alignment_slot, &kSlotsZapValue, kPointerSize);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ memcpy(stack_slot, &address, kPointerSize);
+ set_sp(new_sp);
+ return new_sp;
+}
+
+
+uintptr_t Simulator::PopAddress() {
+ intptr_t current_sp = sp();
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ ASSERT(sizeof(uintptr_t) < 2 * kXRegSize);
+ set_sp(current_sp + 2 * kXRegSize);
+ return address;
+}
+
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit() const {
+ // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
+ // pushing values.
+ return reinterpret_cast<uintptr_t>(stack_limit_) + 1024;
+}
+
+
+Simulator::Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
+ Isolate* isolate, FILE* stream)
+ : decoder_(decoder),
+ last_debugger_input_(NULL),
+ log_parameters_(NO_PARAM),
+ isolate_(isolate) {
+ // Setup the decoder.
+ decoder_->AppendVisitor(this);
+
+ Init(stream);
+
+ if (FLAG_trace_sim) {
+ decoder_->InsertVisitorBefore(print_disasm_, this);
+ log_parameters_ = LOG_ALL;
+ }
+
+ if (FLAG_log_instruction_stats) {
+ instrument_ = new Instrument(FLAG_log_instruction_file,
+ FLAG_log_instruction_period);
+ decoder_->AppendVisitor(instrument_);
+ }
+}
+
+
+Simulator::Simulator()
+ : decoder_(NULL),
+ last_debugger_input_(NULL),
+ log_parameters_(NO_PARAM),
+ isolate_(NULL) {
+ Init(NULL);
+ CHECK(!FLAG_trace_sim && !FLAG_log_instruction_stats);
+}
+
+
+void Simulator::Init(FILE* stream) {
+ ResetState();
+
+ // Allocate and setup the simulator stack.
+ stack_size_ = (FLAG_sim_stack_size * KB) + (2 * stack_protection_size_);
+ stack_ = new byte[stack_size_];
+ stack_limit_ = stack_ + stack_protection_size_;
+ byte* tos = stack_ + stack_size_ - stack_protection_size_;
+ // The stack pointer must be 16 bytes aligned.
+ set_sp(reinterpret_cast<int64_t>(tos) & ~0xfUL);
+
+ stream_ = stream;
+ print_disasm_ = new PrintDisassembler(stream_);
+
+ // The debugger needs to disassemble code without the simulator executing an
+ // instruction, so we create a dedicated decoder.
+ disassembler_decoder_ = new Decoder<DispatchingDecoderVisitor>();
+ disassembler_decoder_->AppendVisitor(print_disasm_);
+}
+
+
+void Simulator::ResetState() {
+ // Reset the system registers.
+ nzcv_ = SimSystemRegister::DefaultValueFor(NZCV);
+ fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
+
+ // Reset registers to 0.
+ pc_ = NULL;
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ set_xreg(i, 0xbadbeef);
+ }
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ // Set FP registers to a value that is NaN in both 32-bit and 64-bit FP.
+ set_dreg_bits(i, 0x7ff000007f800001UL);
+ }
+ // Returning to address 0 exits the Simulator.
+ set_lr(kEndOfSimAddress);
+
+ // Reset debug helpers.
+ breakpoints_.empty();
+ break_on_next_= false;
+}
+
+
+Simulator::~Simulator() {
+ delete[] stack_;
+ if (FLAG_log_instruction_stats) {
+ delete instrument_;
+ }
+ delete disassembler_decoder_;
+ delete print_disasm_;
+ DeleteArray(last_debugger_input_);
+ delete decoder_;
+}
+
+
+void Simulator::Run() {
+ pc_modified_ = false;
+ while (pc_ != kEndOfSimAddress) {
+ ExecuteInstruction();
+ }
+}
+
+
+void Simulator::RunFrom(Instruction* start) {
+ set_pc(start);
+ Run();
+}
+
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a svc (Supervisor Call) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the svc instruction so the simulator knows what to call.
+class Redirection {
+ public:
+ Redirection(void* external_function, ExternalReference::Type type)
+ : external_function_(external_function),
+ type_(type),
+ next_(NULL) {
+ redirect_call_.SetInstructionBits(
+ HLT | Assembler::ImmException(kImmExceptionIsRedirectedCall));
+ Isolate* isolate = Isolate::Current();
+ next_ = isolate->simulator_redirection();
+ // TODO(all): Simulator flush I cache
+ isolate->set_simulator_redirection(this);
+ }
+
+ void* address_of_redirect_call() {
+ return reinterpret_cast<void*>(&redirect_call_);
+ }
+
+ template <typename T>
+ T external_function() { return reinterpret_cast<T>(external_function_); }
+
+ ExternalReference::Type type() { return type_; }
+
+ static Redirection* Get(void* external_function,
+ ExternalReference::Type type) {
+ Isolate* isolate = Isolate::Current();
+ Redirection* current = isolate->simulator_redirection();
+ for (; current != NULL; current = current->next_) {
+ if (current->external_function_ == external_function) {
+ ASSERT_EQ(current->type(), type);
+ return current;
+ }
+ }
+ return new Redirection(external_function, type);
+ }
+
+ static Redirection* FromHltInstruction(Instruction* redirect_call) {
+ char* addr_of_hlt = reinterpret_cast<char*>(redirect_call);
+ char* addr_of_redirection =
+ addr_of_hlt - OFFSET_OF(Redirection, redirect_call_);
+ return reinterpret_cast<Redirection*>(addr_of_redirection);
+ }
+
+ static void* ReverseRedirection(int64_t reg) {
+ Redirection* redirection =
+ FromHltInstruction(reinterpret_cast<Instruction*>(reg));
+ return redirection->external_function<void*>();
+ }
+
+ private:
+ void* external_function_;
+ Instruction redirect_call_;
+ ExternalReference::Type type_;
+ Redirection* next_;
+};
+
+
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in runtime.cc
+// uses the ObjectPair structure.
+// The simulator assumes all runtime calls return two 64-bits values. If they
+// don't, register x1 is clobbered. This is fine because x1 is caller-saved.
+struct ObjectPair {
+ int64_t res0;
+ int64_t res1;
+};
+
+
+typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
+ int64_t arg1,
+ int64_t arg2,
+ int64_t arg3,
+ int64_t arg4,
+ int64_t arg5,
+ int64_t arg6,
+ int64_t arg7);
+
+typedef int64_t (*SimulatorRuntimeCompareCall)(double arg1, double arg2);
+typedef double (*SimulatorRuntimeFPFPCall)(double arg1, double arg2);
+typedef double (*SimulatorRuntimeFPCall)(double arg1);
+typedef double (*SimulatorRuntimeFPIntCall)(double arg1, int32_t arg2);
+
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef void (*SimulatorRuntimeDirectApiCall)(int64_t arg0);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int64_t arg0, void* arg1);
+
+// This signature supports direct call to accessor getter callback.
+typedef void (*SimulatorRuntimeDirectGetterCall)(int64_t arg0, int64_t arg1);
+typedef void (*SimulatorRuntimeProfilingGetterCall)(int64_t arg0, int64_t arg1,
+ void* arg2);
+
+void Simulator::DoRuntimeCall(Instruction* instr) {
+ Redirection* redirection = Redirection::FromHltInstruction(instr);
+
+ // The called C code might itself call simulated code, so any
+ // caller-saved registers (including lr) could still be clobbered by a
+ // redirected call.
+ Instruction* return_address = lr();
+
+ int64_t external = redirection->external_function<int64_t>();
+
+ TraceSim("Call to host function at %p\n",
+ redirection->external_function<void*>());
+
+ // SP must be 16-byte-aligned at the call interface.
+ bool stack_alignment_exception = ((sp() & 0xf) != 0);
+ if (stack_alignment_exception) {
+ TraceSim(" with unaligned stack 0x%016" PRIx64 ".\n", sp());
+ FATAL("ALIGNMENT EXCEPTION");
+ }
+
+ switch (redirection->type()) {
+ default:
+ TraceSim("Type: Unknown.\n");
+ UNREACHABLE();
+ break;
+
+ case ExternalReference::BUILTIN_CALL: {
+ // MaybeObject* f(v8::internal::Arguments).
+ TraceSim("Type: BUILTIN_CALL\n");
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+
+ // We don't know how many arguments are being passed, but we can
+ // pass 8 without touching the stack. They will be ignored by the
+ // host function if they aren't used.
+ TraceSim("Arguments: "
+ "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64,
+ xreg(0), xreg(1), xreg(2), xreg(3),
+ xreg(4), xreg(5), xreg(6), xreg(7));
+ ObjectPair result = target(xreg(0), xreg(1), xreg(2), xreg(3),
+ xreg(4), xreg(5), xreg(6), xreg(7));
+ TraceSim("Returned: {0x%" PRIx64 ", 0x%" PRIx64 "}\n",
+ result.res0, result.res1);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_xreg(0, result.res0);
+ set_xreg(1, result.res1);
+ break;
+ }
+
+ case ExternalReference::DIRECT_API_CALL: {
+ // void f(v8::FunctionCallbackInfo&)
+ TraceSim("Type: DIRECT_API_CALL\n");
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ TraceSim("Arguments: 0x%016" PRIx64 "\n", xreg(0));
+ target(xreg(0));
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ // int f(double, double)
+ TraceSim("Type: BUILTIN_COMPARE_CALL\n");
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
+ int64_t result = target(dreg(0), dreg(1));
+ TraceSim("Returned: %" PRId64 "\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_xreg(0, result);
+ break;
+ }
+
+ case ExternalReference::BUILTIN_FP_CALL: {
+ // double f(double)
+ TraceSim("Type: BUILTIN_FP_CALL\n");
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ TraceSim("Argument: %f\n", dreg(0));
+ double result = target(dreg(0));
+ TraceSim("Returned: %f\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_dreg(0, result);
+ break;
+ }
+
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ // double f(double, double)
+ TraceSim("Type: BUILTIN_FP_FP_CALL\n");
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
+ double result = target(dreg(0), dreg(1));
+ TraceSim("Returned: %f\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_dreg(0, result);
+ break;
+ }
+
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ // double f(double, int)
+ TraceSim("Type: BUILTIN_FP_INT_CALL\n");
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ TraceSim("Arguments: %f, %d\n", dreg(0), wreg(0));
+ double result = target(dreg(0), wreg(0));
+ TraceSim("Returned: %f\n", result);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_dreg(0, result);
+ break;
+ }
+
+ case ExternalReference::DIRECT_GETTER_CALL: {
+ // void f(Local<String> property, PropertyCallbackInfo& info)
+ TraceSim("Type: DIRECT_GETTER_CALL\n");
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 "\n",
+ xreg(0), xreg(1));
+ target(xreg(0), xreg(1));
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+
+ case ExternalReference::PROFILING_API_CALL: {
+ // void f(v8::FunctionCallbackInfo&, v8::FunctionCallback)
+ TraceSim("Type: PROFILING_API_CALL\n");
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ void* arg1 = Redirection::ReverseRedirection(xreg(1));
+ TraceSim("Arguments: 0x%016" PRIx64 ", %p\n", xreg(0), arg1);
+ target(xreg(0), arg1);
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+
+ case ExternalReference::PROFILING_GETTER_CALL: {
+ // void f(Local<String> property, PropertyCallbackInfo& info,
+ // AccessorGetterCallback callback)
+ TraceSim("Type: PROFILING_GETTER_CALL\n");
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
+ external);
+ void* arg2 = Redirection::ReverseRedirection(xreg(2));
+ TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 ", %p\n",
+ xreg(0), xreg(1), arg2);
+ target(xreg(0), xreg(1), arg2);
+ TraceSim("No return value.");
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ break;
+ }
+ }
+
+ set_lr(return_address);
+ set_pc(return_address);
+}
+
+
+void* Simulator::RedirectExternalReference(void* external_function,
+ ExternalReference::Type type) {
+ Redirection* redirection = Redirection::Get(external_function, type);
+ return redirection->address_of_redirect_call();
+}
+
+
+const char* Simulator::xreg_names[] = {
+"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
+"ip0", "ip1", "x18", "x19", "x20", "x21", "x22", "x23",
+"x24", "x25", "x26", "cp", "jssp", "fp", "lr", "xzr", "csp"};
+
+const char* Simulator::wreg_names[] = {
+"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7",
+"w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15",
+"w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
+"w24", "w25", "w26", "wcp", "wjssp", "wfp", "wlr", "wzr", "wcsp"};
+
+const char* Simulator::sreg_names[] = {
+"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+"s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
+"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
+"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"};
+
+const char* Simulator::dreg_names[] = {
+"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+"d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
+
+const char* Simulator::vreg_names[] = {
+"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+"v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
+"v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
+"v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
+
+
+const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
+ ASSERT(code < kNumberOfRegisters);
+ // If the code represents the stack pointer, index the name after zr.
+ if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
+ code = kZeroRegCode + 1;
+ }
+ return wreg_names[code];
+}
+
+
+const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
+ ASSERT(code < kNumberOfRegisters);
+ // If the code represents the stack pointer, index the name after zr.
+ if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
+ code = kZeroRegCode + 1;
+ }
+ return xreg_names[code];
+}
+
+
+const char* Simulator::SRegNameForCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return sreg_names[code];
+}
+
+
+const char* Simulator::DRegNameForCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return dreg_names[code];
+}
+
+
+const char* Simulator::VRegNameForCode(unsigned code) {
+ ASSERT(code < kNumberOfFPRegisters);
+ return vreg_names[code];
+}
+
+
+int Simulator::CodeFromName(const char* name) {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if ((strcmp(xreg_names[i], name) == 0) ||
+ (strcmp(wreg_names[i], name) == 0)) {
+ return i;
+ }
+ }
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ if ((strcmp(vreg_names[i], name) == 0) ||
+ (strcmp(dreg_names[i], name) == 0) ||
+ (strcmp(sreg_names[i], name) == 0)) {
+ return i;
+ }
+ }
+ if ((strcmp("csp", name) == 0) || (strcmp("wcsp", name) == 0)) {
+ return kSPRegInternalCode;
+ }
+ return -1;
+}
+
+
+// Helpers ---------------------------------------------------------------------
+int64_t Simulator::AddWithCarry(unsigned reg_size,
+ bool set_flags,
+ int64_t src1,
+ int64_t src2,
+ int64_t carry_in) {
+ ASSERT((carry_in == 0) || (carry_in == 1));
+ ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
+
+ uint64_t u1, u2;
+ int64_t result;
+ int64_t signed_sum = src1 + src2 + carry_in;
+
+ bool N, Z, C, V;
+
+ if (reg_size == kWRegSizeInBits) {
+ u1 = static_cast<uint64_t>(src1) & kWRegMask;
+ u2 = static_cast<uint64_t>(src2) & kWRegMask;
+
+ result = signed_sum & kWRegMask;
+ // Compute the C flag by comparing the sum to the max unsigned integer.
+ C = ((kWMaxUInt - u1) < (u2 + carry_in)) ||
+ ((kWMaxUInt - u1 - carry_in) < u2);
+ // Overflow iff the sign bit is the same for the two inputs and different
+ // for the result.
+ int64_t s_src1 = src1 << (kXRegSizeInBits - kWRegSizeInBits);
+ int64_t s_src2 = src2 << (kXRegSizeInBits - kWRegSizeInBits);
+ int64_t s_result = result << (kXRegSizeInBits - kWRegSizeInBits);
+ V = ((s_src1 ^ s_src2) >= 0) && ((s_src1 ^ s_result) < 0);
+
+ } else {
+ u1 = static_cast<uint64_t>(src1);
+ u2 = static_cast<uint64_t>(src2);
+
+ result = signed_sum;
+ // Compute the C flag by comparing the sum to the max unsigned integer.
+ C = ((kXMaxUInt - u1) < (u2 + carry_in)) ||
+ ((kXMaxUInt - u1 - carry_in) < u2);
+ // Overflow iff the sign bit is the same for the two inputs and different
+ // for the result.
+ V = ((src1 ^ src2) >= 0) && ((src1 ^ result) < 0);
+ }
+
+ N = CalcNFlag(result, reg_size);
+ Z = CalcZFlag(result);
+
+ if (set_flags) {
+ nzcv().SetN(N);
+ nzcv().SetZ(Z);
+ nzcv().SetC(C);
+ nzcv().SetV(V);
+ }
+ return result;
+}
+
+
+int64_t Simulator::ShiftOperand(unsigned reg_size,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount) {
+ if (amount == 0) {
+ return value;
+ }
+ int64_t mask = reg_size == kXRegSizeInBits ? kXRegMask : kWRegMask;
+ switch (shift_type) {
+ case LSL:
+ return (value << amount) & mask;
+ case LSR:
+ return static_cast<uint64_t>(value) >> amount;
+ case ASR: {
+ // Shift used to restore the sign.
+ unsigned s_shift = kXRegSizeInBits - reg_size;
+ // Value with its sign restored.
+ int64_t s_value = (value << s_shift) >> s_shift;
+ return (s_value >> amount) & mask;
+ }
+ case ROR: {
+ if (reg_size == kWRegSizeInBits) {
+ value &= kWRegMask;
+ }
+ return (static_cast<uint64_t>(value) >> amount) |
+ ((value & ((1L << amount) - 1L)) << (reg_size - amount));
+ }
+ default:
+ UNIMPLEMENTED();
+ return 0;
+ }
+}
+
+
+int64_t Simulator::ExtendValue(unsigned reg_size,
+ int64_t value,
+ Extend extend_type,
+ unsigned left_shift) {
+ switch (extend_type) {
+ case UXTB:
+ value &= kByteMask;
+ break;
+ case UXTH:
+ value &= kHalfWordMask;
+ break;
+ case UXTW:
+ value &= kWordMask;
+ break;
+ case SXTB:
+ value = (value << 56) >> 56;
+ break;
+ case SXTH:
+ value = (value << 48) >> 48;
+ break;
+ case SXTW:
+ value = (value << 32) >> 32;
+ break;
+ case UXTX:
+ case SXTX:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ int64_t mask = (reg_size == kXRegSizeInBits) ? kXRegMask : kWRegMask;
+ return (value << left_shift) & mask;
+}
+
+
+template<> double Simulator::FPDefaultNaN<double>() const {
+ return kFP64DefaultNaN;
+}
+
+
+template<> float Simulator::FPDefaultNaN<float>() const {
+ return kFP32DefaultNaN;
+}
+
+
+void Simulator::FPCompare(double val0, double val1) {
+ AssertSupportedFPCR();
+
+ // TODO(jbramley): This assumes that the C++ implementation handles
+ // comparisons in the way that we expect (as per AssertSupportedFPCR()).
+ if ((std::isnan(val0) != 0) || (std::isnan(val1) != 0)) {
+ nzcv().SetRawValue(FPUnorderedFlag);
+ } else if (val0 < val1) {
+ nzcv().SetRawValue(FPLessThanFlag);
+ } else if (val0 > val1) {
+ nzcv().SetRawValue(FPGreaterThanFlag);
+ } else if (val0 == val1) {
+ nzcv().SetRawValue(FPEqualFlag);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void Simulator::SetBreakpoint(Instruction* location) {
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ if (breakpoints_.at(i).location == location) {
+ PrintF("Existing breakpoint at %p was %s\n",
+ reinterpret_cast<void*>(location),
+ breakpoints_.at(i).enabled ? "disabled" : "enabled");
+ breakpoints_.at(i).enabled = !breakpoints_.at(i).enabled;
+ return;
+ }
+ }
+ Breakpoint new_breakpoint = {location, true};
+ breakpoints_.push_back(new_breakpoint);
+ PrintF("Set a breakpoint at %p\n", reinterpret_cast<void*>(location));
+}
+
+
+void Simulator::ListBreakpoints() {
+ PrintF("Breakpoints:\n");
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ PrintF("%p : %s\n",
+ reinterpret_cast<void*>(breakpoints_.at(i).location),
+ breakpoints_.at(i).enabled ? "enabled" : "disabled");
+ }
+}
+
+
+void Simulator::CheckBreakpoints() {
+ bool hit_a_breakpoint = false;
+ for (unsigned i = 0; i < breakpoints_.size(); i++) {
+ if ((breakpoints_.at(i).location == pc_) &&
+ breakpoints_.at(i).enabled) {
+ hit_a_breakpoint = true;
+ // Disable this breakpoint.
+ breakpoints_.at(i).enabled = false;
+ }
+ }
+ if (hit_a_breakpoint) {
+ PrintF("Hit and disabled a breakpoint at %p.\n",
+ reinterpret_cast<void*>(pc_));
+ Debug();
+ }
+}
+
+
+void Simulator::CheckBreakNext() {
+ // If the current instruction is a BL, insert a breakpoint just after it.
+ if (break_on_next_ && pc_->IsBranchAndLinkToRegister()) {
+ SetBreakpoint(pc_->following());
+ break_on_next_ = false;
+ }
+}
+
+
+void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
+ Instruction* end = start->InstructionAtOffset(count * kInstructionSize);
+ for (Instruction* pc = start; pc < end; pc = pc->following()) {
+ disassembler_decoder_->Decode(pc);
+ }
+}
+
+
+void Simulator::PrintSystemRegisters(bool print_all) {
+ static bool first_run = true;
+
+ static SimSystemRegister last_nzcv;
+ if (print_all || first_run || (last_nzcv.RawValue() != nzcv().RawValue())) {
+ fprintf(stream_, "# %sFLAGS: %sN:%d Z:%d C:%d V:%d%s\n",
+ clr_flag_name,
+ clr_flag_value,
+ nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
+ clr_normal);
+ }
+ last_nzcv = nzcv();
+
+ static SimSystemRegister last_fpcr;
+ if (print_all || first_run || (last_fpcr.RawValue() != fpcr().RawValue())) {
+ static const char * rmode[] = {
+ "0b00 (Round to Nearest)",
+ "0b01 (Round towards Plus Infinity)",
+ "0b10 (Round towards Minus Infinity)",
+ "0b11 (Round towards Zero)"
+ };
+ ASSERT(fpcr().RMode() <= (sizeof(rmode) / sizeof(rmode[0])));
+ fprintf(stream_, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
+ clr_flag_name,
+ clr_flag_value,
+ fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
+ clr_normal);
+ }
+ last_fpcr = fpcr();
+
+ first_run = false;
+}
+
+
+void Simulator::PrintRegisters(bool print_all_regs) {
+ static bool first_run = true;
+ static int64_t last_regs[kNumberOfRegisters];
+
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if (print_all_regs || first_run ||
+ (last_regs[i] != xreg(i, Reg31IsStackPointer))) {
+ fprintf(stream_,
+ "# %s%4s:%s 0x%016" PRIx64 "%s\n",
+ clr_reg_name,
+ XRegNameForCode(i, Reg31IsStackPointer),
+ clr_reg_value,
+ xreg(i, Reg31IsStackPointer),
+ clr_normal);
+ }
+ // Cache the new register value so the next run can detect any changes.
+ last_regs[i] = xreg(i, Reg31IsStackPointer);
+ }
+ first_run = false;
+}
+
+
+void Simulator::PrintFPRegisters(bool print_all_regs) {
+ static bool first_run = true;
+ static uint64_t last_regs[kNumberOfFPRegisters];
+
+ // Print as many rows of registers as necessary, keeping each individual
+ // register in the same column each time (to make it easy to visually scan
+ // for changes).
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ if (print_all_regs || first_run || (last_regs[i] != dreg_bits(i))) {
+ fprintf(stream_,
+ "# %s %4s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
+ clr_fpreg_name,
+ VRegNameForCode(i),
+ clr_fpreg_value,
+ dreg_bits(i),
+ clr_normal,
+ clr_fpreg_name,
+ DRegNameForCode(i),
+ clr_fpreg_value,
+ dreg(i),
+ clr_fpreg_name,
+ SRegNameForCode(i),
+ clr_fpreg_value,
+ sreg(i),
+ clr_normal);
+ }
+ // Cache the new register value so the next run can detect any changes.
+ last_regs[i] = dreg_bits(i);
+ }
+ first_run = false;
+}
+
+
+void Simulator::PrintProcessorState() {
+ PrintSystemRegisters();
+ PrintRegisters();
+ PrintFPRegisters();
+}
+
+
+void Simulator::PrintWrite(uint8_t* address,
+ uint64_t value,
+ unsigned num_bytes) {
+ // The template is "# value -> address". The template is not directly used
+ // in the printf since compilers tend to struggle with the parametrized
+ // width (%0*).
+ const char* format = "# %s0x%0*" PRIx64 "%s -> %s0x%016" PRIx64 "%s\n";
+ fprintf(stream_,
+ format,
+ clr_memory_value,
+ num_bytes * 2, // The width in hexa characters.
+ value,
+ clr_normal,
+ clr_memory_address,
+ address,
+ clr_normal);
+}
+
+
+// Visitors---------------------------------------------------------------------
+
+void Simulator::VisitUnimplemented(Instruction* instr) {
+ fprintf(stream_, "Unimplemented instruction at %p: 0x%08" PRIx32 "\n",
+ reinterpret_cast<void*>(instr), instr->InstructionBits());
+ UNIMPLEMENTED();
+}
+
+
+void Simulator::VisitUnallocated(Instruction* instr) {
+ fprintf(stream_, "Unallocated instruction at %p: 0x%08" PRIx32 "\n",
+ reinterpret_cast<void*>(instr), instr->InstructionBits());
+ UNIMPLEMENTED();
+}
+
+
+void Simulator::VisitPCRelAddressing(Instruction* instr) {
+ switch (instr->Mask(PCRelAddressingMask)) {
+ case ADR:
+ set_reg(instr->Rd(), instr->ImmPCOffsetTarget());
+ break;
+ case ADRP: // Not implemented in the assembler.
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void Simulator::VisitUnconditionalBranch(Instruction* instr) {
+ switch (instr->Mask(UnconditionalBranchMask)) {
+ case BL:
+ set_lr(instr->following());
+ // Fall through.
+ case B:
+ set_pc(instr->ImmPCOffsetTarget());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitConditionalBranch(Instruction* instr) {
+ ASSERT(instr->Mask(ConditionalBranchMask) == B_cond);
+ if (ConditionPassed(static_cast<Condition>(instr->ConditionBranch()))) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
+ Instruction* target = reg<Instruction*>(instr->Rn());
+ switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+ case BLR: {
+ set_lr(instr->following());
+ if (instr->Rn() == 31) {
+ // BLR XZR is used as a guard for the constant pool. We should never hit
+ // this, but if we do trap to allow debugging.
+ Debug();
+ }
+ // Fall through.
+ }
+ case BR:
+ case RET: set_pc(target); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitTestBranch(Instruction* instr) {
+ unsigned bit_pos = (instr->ImmTestBranchBit5() << 5) |
+ instr->ImmTestBranchBit40();
+ bool take_branch = ((xreg(instr->Rt()) & (1UL << bit_pos)) == 0);
+ switch (instr->Mask(TestBranchMask)) {
+ case TBZ: break;
+ case TBNZ: take_branch = !take_branch; break;
+ default: UNIMPLEMENTED();
+ }
+ if (take_branch) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::VisitCompareBranch(Instruction* instr) {
+ unsigned rt = instr->Rt();
+ bool take_branch = false;
+ switch (instr->Mask(CompareBranchMask)) {
+ case CBZ_w: take_branch = (wreg(rt) == 0); break;
+ case CBZ_x: take_branch = (xreg(rt) == 0); break;
+ case CBNZ_w: take_branch = (wreg(rt) != 0); break;
+ case CBNZ_x: take_branch = (xreg(rt) != 0); break;
+ default: UNIMPLEMENTED();
+ }
+ if (take_branch) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::AddSubHelper(Instruction* instr, int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ bool set_flags = instr->FlagsUpdate();
+ int64_t new_val = 0;
+ Instr operation = instr->Mask(AddSubOpMask);
+
+ switch (operation) {
+ case ADD:
+ case ADDS: {
+ new_val = AddWithCarry(reg_size,
+ set_flags,
+ reg(reg_size, instr->Rn(), instr->RnMode()),
+ op2);
+ break;
+ }
+ case SUB:
+ case SUBS: {
+ new_val = AddWithCarry(reg_size,
+ set_flags,
+ reg(reg_size, instr->Rn(), instr->RnMode()),
+ ~op2,
+ 1);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ set_reg(reg_size, instr->Rd(), new_val, instr->RdMode());
+}
+
+
+void Simulator::VisitAddSubShifted(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ int64_t op2 = ShiftOperand(reg_size,
+ reg(reg_size, instr->Rm()),
+ static_cast<Shift>(instr->ShiftDP()),
+ instr->ImmDPShift());
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubImmediate(Instruction* instr) {
+ int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0);
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubExtended(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ int64_t op2 = ExtendValue(reg_size,
+ reg(reg_size, instr->Rm()),
+ static_cast<Extend>(instr->ExtendMode()),
+ instr->ImmExtendShift());
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubWithCarry(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ int64_t op2 = reg(reg_size, instr->Rm());
+ int64_t new_val;
+
+ if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) {
+ op2 = ~op2;
+ }
+
+ new_val = AddWithCarry(reg_size,
+ instr->FlagsUpdate(),
+ reg(reg_size, instr->Rn()),
+ op2,
+ nzcv().C());
+
+ set_reg(reg_size, instr->Rd(), new_val);
+}
+
+
+void Simulator::VisitLogicalShifted(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ Shift shift_type = static_cast<Shift>(instr->ShiftDP());
+ unsigned shift_amount = instr->ImmDPShift();
+ int64_t op2 = ShiftOperand(reg_size, reg(reg_size, instr->Rm()), shift_type,
+ shift_amount);
+ if (instr->Mask(NOT) == NOT) {
+ op2 = ~op2;
+ }
+ LogicalHelper(instr, op2);
+}
+
+
+void Simulator::VisitLogicalImmediate(Instruction* instr) {
+ LogicalHelper(instr, instr->ImmLogical());
+}
+
+
+void Simulator::LogicalHelper(Instruction* instr, int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ int64_t op1 = reg(reg_size, instr->Rn());
+ int64_t result = 0;
+ bool update_flags = false;
+
+ // Switch on the logical operation, stripping out the NOT bit, as it has a
+ // different meaning for logical immediate instructions.
+ switch (instr->Mask(LogicalOpMask & ~NOT)) {
+ case ANDS: update_flags = true; // Fall through.
+ case AND: result = op1 & op2; break;
+ case ORR: result = op1 | op2; break;
+ case EOR: result = op1 ^ op2; break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ if (update_flags) {
+ nzcv().SetN(CalcNFlag(result, reg_size));
+ nzcv().SetZ(CalcZFlag(result));
+ nzcv().SetC(0);
+ nzcv().SetV(0);
+ }
+
+ set_reg(reg_size, instr->Rd(), result, instr->RdMode());
+}
+
+
+void Simulator::VisitConditionalCompareRegister(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ ConditionalCompareHelper(instr, reg(reg_size, instr->Rm()));
+}
+
+
+void Simulator::VisitConditionalCompareImmediate(Instruction* instr) {
+ ConditionalCompareHelper(instr, instr->ImmCondCmp());
+}
+
+
+void Simulator::ConditionalCompareHelper(Instruction* instr, int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ int64_t op1 = reg(reg_size, instr->Rn());
+
+ if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
+ // If the condition passes, set the status flags to the result of comparing
+ // the operands.
+ if (instr->Mask(ConditionalCompareMask) == CCMP) {
+ AddWithCarry(reg_size, true, op1, ~op2, 1);
+ } else {
+ ASSERT(instr->Mask(ConditionalCompareMask) == CCMN);
+ AddWithCarry(reg_size, true, op1, op2, 0);
+ }
+ } else {
+ // If the condition fails, set the status flags to the nzcv immediate.
+ nzcv().SetFlags(instr->Nzcv());
+ }
+}
+
+
+void Simulator::VisitLoadStoreUnsignedOffset(Instruction* instr) {
+ int offset = instr->ImmLSUnsigned() << instr->SizeLS();
+ LoadStoreHelper(instr, offset, Offset);
+}
+
+
+void Simulator::VisitLoadStoreUnscaledOffset(Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), Offset);
+}
+
+
+void Simulator::VisitLoadStorePreIndex(Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), PreIndex);
+}
+
+
+void Simulator::VisitLoadStorePostIndex(Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), PostIndex);
+}
+
+
+void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) {
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
+ unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS();
+
+ int64_t offset = ExtendValue(kXRegSizeInBits, xreg(instr->Rm()), ext,
+ shift_amount);
+ LoadStoreHelper(instr, offset, Offset);
+}
+
+
+void Simulator::LoadStoreHelper(Instruction* instr,
+ int64_t offset,
+ AddrMode addrmode) {
+ unsigned srcdst = instr->Rt();
+ unsigned addr_reg = instr->Rn();
+ uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
+ int num_bytes = 1 << instr->SizeLS();
+ uint8_t* stack = NULL;
+
+ // Handle the writeback for stores before the store. On a CPU the writeback
+ // and the store are atomic, but when running on the simulator it is possible
+ // to be interrupted in between. The simulator is not thread safe and V8 does
+ // not require it to be to run JavaScript therefore the profiler may sample
+ // the "simulated" CPU in the middle of load/store with writeback. The code
+ // below ensures that push operations are safe even when interrupted: the
+ // stack pointer will be decremented before adding an element to the stack.
+ if (instr->IsStore()) {
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+
+ // For store the address post writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+ }
+
+ LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreOpMask));
+ switch (op) {
+ case LDRB_w:
+ case LDRH_w:
+ case LDR_w:
+ case LDR_x: set_xreg(srcdst, MemoryRead(address, num_bytes)); break;
+ case STRB_w:
+ case STRH_w:
+ case STR_w:
+ case STR_x: MemoryWrite(address, xreg(srcdst), num_bytes); break;
+ case LDRSB_w: {
+ set_wreg(srcdst,
+ ExtendValue(kWRegSizeInBits, MemoryRead8(address), SXTB));
+ break;
+ }
+ case LDRSB_x: {
+ set_xreg(srcdst,
+ ExtendValue(kXRegSizeInBits, MemoryRead8(address), SXTB));
+ break;
+ }
+ case LDRSH_w: {
+ set_wreg(srcdst,
+ ExtendValue(kWRegSizeInBits, MemoryRead16(address), SXTH));
+ break;
+ }
+ case LDRSH_x: {
+ set_xreg(srcdst,
+ ExtendValue(kXRegSizeInBits, MemoryRead16(address), SXTH));
+ break;
+ }
+ case LDRSW_x: {
+ set_xreg(srcdst,
+ ExtendValue(kXRegSizeInBits, MemoryRead32(address), SXTW));
+ break;
+ }
+ case LDR_s: set_sreg(srcdst, MemoryReadFP32(address)); break;
+ case LDR_d: set_dreg(srcdst, MemoryReadFP64(address)); break;
+ case STR_s: MemoryWriteFP32(address, sreg(srcdst)); break;
+ case STR_d: MemoryWriteFP64(address, dreg(srcdst)); break;
+ default: UNIMPLEMENTED();
+ }
+
+ // Handle the writeback for loads after the load to ensure safe pop
+ // operation even when interrupted in the middle of it. The stack pointer
+ // is only updated after the load so pop(fp) will never break the invariant
+ // sp <= fp expected while walking the stack in the sampler.
+ if (instr->IsLoad()) {
+ // For loads the address pre writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+ }
+
+ // Accesses below the stack pointer (but above the platform stack limit) are
+ // not allowed in the ABI.
+ CheckMemoryAccess(address, stack);
+}
+
+
+void Simulator::VisitLoadStorePairOffset(Instruction* instr) {
+ LoadStorePairHelper(instr, Offset);
+}
+
+
+void Simulator::VisitLoadStorePairPreIndex(Instruction* instr) {
+ LoadStorePairHelper(instr, PreIndex);
+}
+
+
+void Simulator::VisitLoadStorePairPostIndex(Instruction* instr) {
+ LoadStorePairHelper(instr, PostIndex);
+}
+
+
+void Simulator::VisitLoadStorePairNonTemporal(Instruction* instr) {
+ LoadStorePairHelper(instr, Offset);
+}
+
+
+void Simulator::LoadStorePairHelper(Instruction* instr,
+ AddrMode addrmode) {
+ unsigned rt = instr->Rt();
+ unsigned rt2 = instr->Rt2();
+ unsigned addr_reg = instr->Rn();
+ int offset = instr->ImmLSPair() << instr->SizeLSPair();
+ uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
+ uint8_t* stack = NULL;
+
+ // Handle the writeback for stores before the store. On a CPU the writeback
+ // and the store are atomic, but when running on the simulator it is possible
+ // to be interrupted in between. The simulator is not thread safe and V8 does
+ // not require it to be to run JavaScript therefore the profiler may sample
+ // the "simulated" CPU in the middle of load/store with writeback. The code
+ // below ensures that push operations are safe even when interrupted: the
+ // stack pointer will be decremented before adding an element to the stack.
+ if (instr->IsStore()) {
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+
+ // For store the address post writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+ }
+
+ LoadStorePairOp op =
+ static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
+
+ // 'rt' and 'rt2' can only be aliased for stores.
+ ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2));
+
+ switch (op) {
+ case LDP_w: {
+ set_wreg(rt, MemoryRead32(address));
+ set_wreg(rt2, MemoryRead32(address + kWRegSize));
+ break;
+ }
+ case LDP_s: {
+ set_sreg(rt, MemoryReadFP32(address));
+ set_sreg(rt2, MemoryReadFP32(address + kSRegSize));
+ break;
+ }
+ case LDP_x: {
+ set_xreg(rt, MemoryRead64(address));
+ set_xreg(rt2, MemoryRead64(address + kXRegSize));
+ break;
+ }
+ case LDP_d: {
+ set_dreg(rt, MemoryReadFP64(address));
+ set_dreg(rt2, MemoryReadFP64(address + kDRegSize));
+ break;
+ }
+ case LDPSW_x: {
+ set_xreg(rt, ExtendValue(kXRegSizeInBits, MemoryRead32(address), SXTW));
+ set_xreg(rt2, ExtendValue(kXRegSizeInBits,
+ MemoryRead32(address + kWRegSize), SXTW));
+ break;
+ }
+ case STP_w: {
+ MemoryWrite32(address, wreg(rt));
+ MemoryWrite32(address + kWRegSize, wreg(rt2));
+ break;
+ }
+ case STP_s: {
+ MemoryWriteFP32(address, sreg(rt));
+ MemoryWriteFP32(address + kSRegSize, sreg(rt2));
+ break;
+ }
+ case STP_x: {
+ MemoryWrite64(address, xreg(rt));
+ MemoryWrite64(address + kXRegSize, xreg(rt2));
+ break;
+ }
+ case STP_d: {
+ MemoryWriteFP64(address, dreg(rt));
+ MemoryWriteFP64(address + kDRegSize, dreg(rt2));
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ // Handle the writeback for loads after the load to ensure safe pop
+ // operation even when interrupted in the middle of it. The stack pointer
+ // is only updated after the load so pop(fp) will never break the invariant
+ // sp <= fp expected while walking the stack in the sampler.
+ if (instr->IsLoad()) {
+ // For loads the address pre writeback is used to check access below the
+ // stack.
+ stack = reinterpret_cast<uint8_t*>(sp());
+
+ LoadStoreWriteBack(addr_reg, offset, addrmode);
+ }
+
+ // Accesses below the stack pointer (but above the platform stack limit) are
+ // not allowed in the ABI.
+ CheckMemoryAccess(address, stack);
+}
+
+
+void Simulator::VisitLoadLiteral(Instruction* instr) {
+ uint8_t* address = instr->LiteralAddress();
+ unsigned rt = instr->Rt();
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit: set_wreg(rt, MemoryRead32(address)); break;
+ case LDR_x_lit: set_xreg(rt, MemoryRead64(address)); break;
+ case LDR_s_lit: set_sreg(rt, MemoryReadFP32(address)); break;
+ case LDR_d_lit: set_dreg(rt, MemoryReadFP64(address)); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+uint8_t* Simulator::LoadStoreAddress(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode) {
+ const unsigned kSPRegCode = kSPRegInternalCode & kRegCodeMask;
+ int64_t address = xreg(addr_reg, Reg31IsStackPointer);
+ if ((addr_reg == kSPRegCode) && ((address % 16) != 0)) {
+ // When the base register is SP the stack pointer is required to be
+ // quadword aligned prior to the address calculation and write-backs.
+ // Misalignment will cause a stack alignment fault.
+ FATAL("ALIGNMENT EXCEPTION");
+ }
+
+ if ((addrmode == Offset) || (addrmode == PreIndex)) {
+ address += offset;
+ }
+
+ return reinterpret_cast<uint8_t*>(address);
+}
+
+
+void Simulator::LoadStoreWriteBack(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode) {
+ if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
+ ASSERT(offset != 0);
+ uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
+ set_reg(addr_reg, address + offset, Reg31IsStackPointer);
+ }
+}
+
+
+void Simulator::CheckMemoryAccess(uint8_t* address, uint8_t* stack) {
+ if ((address >= stack_limit_) && (address < stack)) {
+ fprintf(stream_, "ACCESS BELOW STACK POINTER:\n");
+ fprintf(stream_, " sp is here: 0x%16p\n", stack);
+ fprintf(stream_, " access was here: 0x%16p\n", address);
+ fprintf(stream_, " stack limit is here: 0x%16p\n", stack_limit_);
+ fprintf(stream_, "\n");
+ FATAL("ACCESS BELOW STACK POINTER");
+ }
+}
+
+
+uint64_t Simulator::MemoryRead(uint8_t* address, unsigned num_bytes) {
+ ASSERT(address != NULL);
+ ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
+ uint64_t read = 0;
+ memcpy(&read, address, num_bytes);
+ return read;
+}
+
+
+uint8_t Simulator::MemoryRead8(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint8_t));
+}
+
+
+uint16_t Simulator::MemoryRead16(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint16_t));
+}
+
+
+uint32_t Simulator::MemoryRead32(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint32_t));
+}
+
+
+float Simulator::MemoryReadFP32(uint8_t* address) {
+ return rawbits_to_float(MemoryRead32(address));
+}
+
+
+uint64_t Simulator::MemoryRead64(uint8_t* address) {
+ return MemoryRead(address, sizeof(uint64_t));
+}
+
+
+double Simulator::MemoryReadFP64(uint8_t* address) {
+ return rawbits_to_double(MemoryRead64(address));
+}
+
+
+void Simulator::MemoryWrite(uint8_t* address,
+ uint64_t value,
+ unsigned num_bytes) {
+ ASSERT(address != NULL);
+ ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
+
+ LogWrite(address, value, num_bytes);
+ memcpy(address, &value, num_bytes);
+}
+
+
+void Simulator::MemoryWrite32(uint8_t* address, uint32_t value) {
+ MemoryWrite(address, value, sizeof(uint32_t));
+}
+
+
+void Simulator::MemoryWriteFP32(uint8_t* address, float value) {
+ MemoryWrite32(address, float_to_rawbits(value));
+}
+
+
+void Simulator::MemoryWrite64(uint8_t* address, uint64_t value) {
+ MemoryWrite(address, value, sizeof(uint64_t));
+}
+
+
+void Simulator::MemoryWriteFP64(uint8_t* address, double value) {
+ MemoryWrite64(address, double_to_rawbits(value));
+}
+
+
+void Simulator::VisitMoveWideImmediate(Instruction* instr) {
+ MoveWideImmediateOp mov_op =
+ static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
+ int64_t new_xn_val = 0;
+
+ bool is_64_bits = instr->SixtyFourBits() == 1;
+ // Shift is limited for W operations.
+ ASSERT(is_64_bits || (instr->ShiftMoveWide() < 2));
+
+ // Get the shifted immediate.
+ int64_t shift = instr->ShiftMoveWide() * 16;
+ int64_t shifted_imm16 = instr->ImmMoveWide() << shift;
+
+ // Compute the new value.
+ switch (mov_op) {
+ case MOVN_w:
+ case MOVN_x: {
+ new_xn_val = ~shifted_imm16;
+ if (!is_64_bits) new_xn_val &= kWRegMask;
+ break;
+ }
+ case MOVK_w:
+ case MOVK_x: {
+ unsigned reg_code = instr->Rd();
+ int64_t prev_xn_val = is_64_bits ? xreg(reg_code)
+ : wreg(reg_code);
+ new_xn_val = (prev_xn_val & ~(0xffffL << shift)) | shifted_imm16;
+ break;
+ }
+ case MOVZ_w:
+ case MOVZ_x: {
+ new_xn_val = shifted_imm16;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ // Update the destination register.
+ set_xreg(instr->Rd(), new_xn_val);
+}
+
+
+void Simulator::VisitConditionalSelect(Instruction* instr) {
+ uint64_t new_val = xreg(instr->Rn());
+
+ if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
+ new_val = xreg(instr->Rm());
+ switch (instr->Mask(ConditionalSelectMask)) {
+ case CSEL_w:
+ case CSEL_x: break;
+ case CSINC_w:
+ case CSINC_x: new_val++; break;
+ case CSINV_w:
+ case CSINV_x: new_val = ~new_val; break;
+ case CSNEG_w:
+ case CSNEG_x: new_val = -new_val; break;
+ default: UNIMPLEMENTED();
+ }
+ }
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ set_reg(reg_size, instr->Rd(), new_val);
+}
+
+
+void Simulator::VisitDataProcessing1Source(Instruction* instr) {
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+
+ switch (instr->Mask(DataProcessing1SourceMask)) {
+ case RBIT_w: set_wreg(dst, ReverseBits(wreg(src), kWRegSizeInBits)); break;
+ case RBIT_x: set_xreg(dst, ReverseBits(xreg(src), kXRegSizeInBits)); break;
+ case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse16)); break;
+ case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse16)); break;
+ case REV_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse32)); break;
+ case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse32)); break;
+ case REV_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse64)); break;
+ case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSizeInBits));
+ break;
+ case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSizeInBits));
+ break;
+ case CLS_w: {
+ set_wreg(dst, CountLeadingSignBits(wreg(src), kWRegSizeInBits));
+ break;
+ }
+ case CLS_x: {
+ set_xreg(dst, CountLeadingSignBits(xreg(src), kXRegSizeInBits));
+ break;
+ }
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) {
+ ASSERT((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits));
+ uint64_t result = 0;
+ for (unsigned i = 0; i < num_bits; i++) {
+ result = (result << 1) | (value & 1);
+ value >>= 1;
+ }
+ return result;
+}
+
+
+uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) {
+ // Split the 64-bit value into an 8-bit array, where b[0] is the least
+ // significant byte, and b[7] is the most significant.
+ uint8_t bytes[8];
+ uint64_t mask = 0xff00000000000000UL;
+ for (int i = 7; i >= 0; i--) {
+ bytes[i] = (value & mask) >> (i * 8);
+ mask >>= 8;
+ }
+
+ // Permutation tables for REV instructions.
+ // permute_table[Reverse16] is used by REV16_x, REV16_w
+ // permute_table[Reverse32] is used by REV32_x, REV_w
+ // permute_table[Reverse64] is used by REV_x
+ ASSERT((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
+ static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
+ {4, 5, 6, 7, 0, 1, 2, 3},
+ {0, 1, 2, 3, 4, 5, 6, 7} };
+ uint64_t result = 0;
+ for (int i = 0; i < 8; i++) {
+ result <<= 8;
+ result |= bytes[permute_table[mode][i]];
+ }
+ return result;
+}
+
+
+void Simulator::VisitDataProcessing2Source(Instruction* instr) {
+ Shift shift_op = NO_SHIFT;
+ int64_t result = 0;
+ switch (instr->Mask(DataProcessing2SourceMask)) {
+ case SDIV_w: {
+ int32_t rn = wreg(instr->Rn());
+ int32_t rm = wreg(instr->Rm());
+ if ((rn == kWMinInt) && (rm == -1)) {
+ result = kWMinInt;
+ } else if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case SDIV_x: {
+ int64_t rn = xreg(instr->Rn());
+ int64_t rm = xreg(instr->Rm());
+ if ((rn == kXMinInt) && (rm == -1)) {
+ result = kXMinInt;
+ } else if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case UDIV_w: {
+ uint32_t rn = static_cast<uint32_t>(wreg(instr->Rn()));
+ uint32_t rm = static_cast<uint32_t>(wreg(instr->Rm()));
+ if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case UDIV_x: {
+ uint64_t rn = static_cast<uint64_t>(xreg(instr->Rn()));
+ uint64_t rm = static_cast<uint64_t>(xreg(instr->Rm()));
+ if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case LSLV_w:
+ case LSLV_x: shift_op = LSL; break;
+ case LSRV_w:
+ case LSRV_x: shift_op = LSR; break;
+ case ASRV_w:
+ case ASRV_x: shift_op = ASR; break;
+ case RORV_w:
+ case RORV_x: shift_op = ROR; break;
+ default: UNIMPLEMENTED();
+ }
+
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ if (shift_op != NO_SHIFT) {
+ // Shift distance encoded in the least-significant five/six bits of the
+ // register.
+ int mask = (instr->SixtyFourBits() == 1) ? 0x3f : 0x1f;
+ unsigned shift = wreg(instr->Rm()) & mask;
+ result = ShiftOperand(reg_size, reg(reg_size, instr->Rn()), shift_op,
+ shift);
+ }
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+// The algorithm used is described in section 8.2 of
+// Hacker's Delight, by Henry S. Warren, Jr.
+// It assumes that a right shift on a signed integer is an arithmetic shift.
+static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
+ uint64_t u0, v0, w0;
+ int64_t u1, v1, w1, w2, t;
+
+ u0 = u & 0xffffffffL;
+ u1 = u >> 32;
+ v0 = v & 0xffffffffL;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xffffffffL;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+
+void Simulator::VisitDataProcessing3Source(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+
+ int64_t result = 0;
+ // Extract and sign- or zero-extend 32-bit arguments for widening operations.
+ uint64_t rn_u32 = reg<uint32_t>(instr->Rn());
+ uint64_t rm_u32 = reg<uint32_t>(instr->Rm());
+ int64_t rn_s32 = reg<int32_t>(instr->Rn());
+ int64_t rm_s32 = reg<int32_t>(instr->Rm());
+ switch (instr->Mask(DataProcessing3SourceMask)) {
+ case MADD_w:
+ case MADD_x:
+ result = xreg(instr->Ra()) + (xreg(instr->Rn()) * xreg(instr->Rm()));
+ break;
+ case MSUB_w:
+ case MSUB_x:
+ result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm()));
+ break;
+ case SMADDL_x: result = xreg(instr->Ra()) + (rn_s32 * rm_s32); break;
+ case SMSUBL_x: result = xreg(instr->Ra()) - (rn_s32 * rm_s32); break;
+ case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break;
+ case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break;
+ case SMULH_x:
+ ASSERT(instr->Ra() == kZeroRegCode);
+ result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm()));
+ break;
+ default: UNIMPLEMENTED();
+ }
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+void Simulator::VisitBitfield(Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ int64_t reg_mask = instr->SixtyFourBits() ? kXRegMask : kWRegMask;
+ int64_t R = instr->ImmR();
+ int64_t S = instr->ImmS();
+ int64_t diff = S - R;
+ int64_t mask;
+ if (diff >= 0) {
+ mask = diff < reg_size - 1 ? (1L << (diff + 1)) - 1
+ : reg_mask;
+ } else {
+ mask = ((1L << (S + 1)) - 1);
+ mask = (static_cast<uint64_t>(mask) >> R) | (mask << (reg_size - R));
+ diff += reg_size;
+ }
+
+ // inzero indicates if the extracted bitfield is inserted into the
+ // destination register value or in zero.
+ // If extend is true, extend the sign of the extracted bitfield.
+ bool inzero = false;
+ bool extend = false;
+ switch (instr->Mask(BitfieldMask)) {
+ case BFM_x:
+ case BFM_w:
+ break;
+ case SBFM_x:
+ case SBFM_w:
+ inzero = true;
+ extend = true;
+ break;
+ case UBFM_x:
+ case UBFM_w:
+ inzero = true;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ int64_t dst = inzero ? 0 : reg(reg_size, instr->Rd());
+ int64_t src = reg(reg_size, instr->Rn());
+ // Rotate source bitfield into place.
+ int64_t result = (static_cast<uint64_t>(src) >> R) | (src << (reg_size - R));
+ // Determine the sign extension.
+ int64_t topbits = ((1L << (reg_size - diff - 1)) - 1) << (diff + 1);
+ int64_t signbits = extend && ((src >> S) & 1) ? topbits : 0;
+
+ // Merge sign extension, dest/zero and bitfield.
+ result = signbits | (result & mask) | (dst & ~mask);
+
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+void Simulator::VisitExtract(Instruction* instr) {
+ unsigned lsb = instr->ImmS();
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
+ : kWRegSizeInBits;
+ set_reg(reg_size,
+ instr->Rd(),
+ (static_cast<uint64_t>(reg(reg_size, instr->Rm())) >> lsb) |
+ (reg(reg_size, instr->Rn()) << (reg_size - lsb)));
+}
+
+
+void Simulator::VisitFPImmediate(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dest = instr->Rd();
+ switch (instr->Mask(FPImmediateMask)) {
+ case FMOV_s_imm: set_sreg(dest, instr->ImmFP32()); break;
+ case FMOV_d_imm: set_dreg(dest, instr->ImmFP64()); break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPIntegerConvert(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+
+ FPRounding round = fpcr().RMode();
+
+ switch (instr->Mask(FPIntegerConvertMask)) {
+ case FCVTAS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieAway)); break;
+ case FCVTAS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieAway)); break;
+ case FCVTAS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieAway)); break;
+ case FCVTAS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieAway)); break;
+ case FCVTAU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieAway)); break;
+ case FCVTAU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieAway)); break;
+ case FCVTAU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieAway)); break;
+ case FCVTAU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieAway)); break;
+ case FCVTMS_ws:
+ set_wreg(dst, FPToInt32(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_xs:
+ set_xreg(dst, FPToInt64(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_wd:
+ set_wreg(dst, FPToInt32(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_xd:
+ set_xreg(dst, FPToInt64(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_ws:
+ set_wreg(dst, FPToUInt32(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_xs:
+ set_xreg(dst, FPToUInt64(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_wd:
+ set_wreg(dst, FPToUInt32(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_xd:
+ set_xreg(dst, FPToUInt64(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTNS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieEven)); break;
+ case FCVTNS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieEven)); break;
+ case FCVTNS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieEven)); break;
+ case FCVTNS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieEven)); break;
+ case FCVTNU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieEven)); break;
+ case FCVTNU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieEven)); break;
+ case FCVTNU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieEven)); break;
+ case FCVTNU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieEven)); break;
+ case FCVTZS_ws: set_wreg(dst, FPToInt32(sreg(src), FPZero)); break;
+ case FCVTZS_xs: set_xreg(dst, FPToInt64(sreg(src), FPZero)); break;
+ case FCVTZS_wd: set_wreg(dst, FPToInt32(dreg(src), FPZero)); break;
+ case FCVTZS_xd: set_xreg(dst, FPToInt64(dreg(src), FPZero)); break;
+ case FCVTZU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPZero)); break;
+ case FCVTZU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPZero)); break;
+ case FCVTZU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPZero)); break;
+ case FCVTZU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPZero)); break;
+ case FMOV_ws: set_wreg(dst, sreg_bits(src)); break;
+ case FMOV_xd: set_xreg(dst, dreg_bits(src)); break;
+ case FMOV_sw: set_sreg_bits(dst, wreg(src)); break;
+ case FMOV_dx: set_dreg_bits(dst, xreg(src)); break;
+
+ // A 32-bit input can be handled in the same way as a 64-bit input, since
+ // the sign- or zero-extension will not affect the conversion.
+ case SCVTF_dx: set_dreg(dst, FixedToDouble(xreg(src), 0, round)); break;
+ case SCVTF_dw: set_dreg(dst, FixedToDouble(wreg(src), 0, round)); break;
+ case UCVTF_dx: set_dreg(dst, UFixedToDouble(xreg(src), 0, round)); break;
+ case UCVTF_dw: {
+ set_dreg(dst, UFixedToDouble(reg<uint32_t>(src), 0, round));
+ break;
+ }
+ case SCVTF_sx: set_sreg(dst, FixedToFloat(xreg(src), 0, round)); break;
+ case SCVTF_sw: set_sreg(dst, FixedToFloat(wreg(src), 0, round)); break;
+ case UCVTF_sx: set_sreg(dst, UFixedToFloat(xreg(src), 0, round)); break;
+ case UCVTF_sw: {
+ set_sreg(dst, UFixedToFloat(reg<uint32_t>(src), 0, round));
+ break;
+ }
+
+ default: UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPFixedPointConvert(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+ int fbits = 64 - instr->FPScale();
+
+ FPRounding round = fpcr().RMode();
+
+ switch (instr->Mask(FPFixedPointConvertMask)) {
+ // A 32-bit input can be handled in the same way as a 64-bit input, since
+ // the sign- or zero-extension will not affect the conversion.
+ case SCVTF_dx_fixed:
+ set_dreg(dst, FixedToDouble(xreg(src), fbits, round));
+ break;
+ case SCVTF_dw_fixed:
+ set_dreg(dst, FixedToDouble(wreg(src), fbits, round));
+ break;
+ case UCVTF_dx_fixed:
+ set_dreg(dst, UFixedToDouble(xreg(src), fbits, round));
+ break;
+ case UCVTF_dw_fixed: {
+ set_dreg(dst,
+ UFixedToDouble(reg<uint32_t>(src), fbits, round));
+ break;
+ }
+ case SCVTF_sx_fixed:
+ set_sreg(dst, FixedToFloat(xreg(src), fbits, round));
+ break;
+ case SCVTF_sw_fixed:
+ set_sreg(dst, FixedToFloat(wreg(src), fbits, round));
+ break;
+ case UCVTF_sx_fixed:
+ set_sreg(dst, UFixedToFloat(xreg(src), fbits, round));
+ break;
+ case UCVTF_sw_fixed: {
+ set_sreg(dst,
+ UFixedToFloat(reg<uint32_t>(src), fbits, round));
+ break;
+ }
+ default: UNREACHABLE();
+ }
+}
+
+
+int32_t Simulator::FPToInt32(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kWMaxInt) {
+ return kWMaxInt;
+ } else if (value < kWMinInt) {
+ return kWMinInt;
+ }
+ return std::isnan(value) ? 0 : static_cast<int32_t>(value);
+}
+
+
+int64_t Simulator::FPToInt64(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kXMaxInt) {
+ return kXMaxInt;
+ } else if (value < kXMinInt) {
+ return kXMinInt;
+ }
+ return std::isnan(value) ? 0 : static_cast<int64_t>(value);
+}
+
+
+uint32_t Simulator::FPToUInt32(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kWMaxUInt) {
+ return kWMaxUInt;
+ } else if (value < 0.0) {
+ return 0;
+ }
+ return std::isnan(value) ? 0 : static_cast<uint32_t>(value);
+}
+
+
+uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kXMaxUInt) {
+ return kXMaxUInt;
+ } else if (value < 0.0) {
+ return 0;
+ }
+ return std::isnan(value) ? 0 : static_cast<uint64_t>(value);
+}
+
+
+void Simulator::VisitFPCompare(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned reg_size = (instr->Mask(FP64) == FP64) ? kDRegSizeInBits
+ : kSRegSizeInBits;
+ double fn_val = fpreg(reg_size, instr->Rn());
+
+ switch (instr->Mask(FPCompareMask)) {
+ case FCMP_s:
+ case FCMP_d: FPCompare(fn_val, fpreg(reg_size, instr->Rm())); break;
+ case FCMP_s_zero:
+ case FCMP_d_zero: FPCompare(fn_val, 0.0); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPConditionalCompare(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ switch (instr->Mask(FPConditionalCompareMask)) {
+ case FCCMP_s:
+ case FCCMP_d: {
+ if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
+ // If the condition passes, set the status flags to the result of
+ // comparing the operands.
+ unsigned reg_size = (instr->Mask(FP64) == FP64) ? kDRegSizeInBits
+ : kSRegSizeInBits;
+ FPCompare(fpreg(reg_size, instr->Rn()), fpreg(reg_size, instr->Rm()));
+ } else {
+ // If the condition fails, set the status flags to the nzcv immediate.
+ nzcv().SetFlags(instr->Nzcv());
+ }
+ break;
+ }
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPConditionalSelect(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ Instr selected;
+ if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
+ selected = instr->Rn();
+ } else {
+ selected = instr->Rm();
+ }
+
+ switch (instr->Mask(FPConditionalSelectMask)) {
+ case FCSEL_s: set_sreg(instr->Rd(), sreg(selected)); break;
+ case FCSEL_d: set_dreg(instr->Rd(), dreg(selected)); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPDataProcessing1Source(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+
+ switch (instr->Mask(FPDataProcessing1SourceMask)) {
+ case FMOV_s: set_sreg(fd, sreg(fn)); break;
+ case FMOV_d: set_dreg(fd, dreg(fn)); break;
+ case FABS_s: set_sreg(fd, std::fabs(sreg(fn))); break;
+ case FABS_d: set_dreg(fd, std::fabs(dreg(fn))); break;
+ case FNEG_s: set_sreg(fd, -sreg(fn)); break;
+ case FNEG_d: set_dreg(fd, -dreg(fn)); break;
+ case FSQRT_s: set_sreg(fd, FPSqrt(sreg(fn))); break;
+ case FSQRT_d: set_dreg(fd, FPSqrt(dreg(fn))); break;
+ case FRINTA_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieAway)); break;
+ case FRINTA_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieAway)); break;
+ case FRINTN_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieEven)); break;
+ case FRINTN_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieEven)); break;
+ case FRINTZ_s: set_sreg(fd, FPRoundInt(sreg(fn), FPZero)); break;
+ case FRINTZ_d: set_dreg(fd, FPRoundInt(dreg(fn), FPZero)); break;
+ case FCVT_ds: set_dreg(fd, FPToDouble(sreg(fn))); break;
+ case FCVT_sd: set_sreg(fd, FPToFloat(dreg(fn), FPTieEven)); break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+// Assemble the specified IEEE-754 components into the target type and apply
+// appropriate rounding.
+// sign: 0 = positive, 1 = negative
+// exponent: Unbiased IEEE-754 exponent.
+// mantissa: The mantissa of the input. The top bit (which is not encoded for
+// normal IEEE-754 values) must not be omitted. This bit has the
+// value 'pow(2, exponent)'.
+//
+// The input value is assumed to be a normalized value. That is, the input may
+// not be infinity or NaN. If the source value is subnormal, it must be
+// normalized before calling this function such that the highest set bit in the
+// mantissa has the value 'pow(2, exponent)'.
+//
+// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than
+// calling a templated FPRound.
+template <class T, int ebits, int mbits>
+static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
+ FPRounding round_mode) {
+ ASSERT((sign == 0) || (sign == 1));
+
+ // Only the FPTieEven rounding mode is implemented.
+ ASSERT(round_mode == FPTieEven);
+ USE(round_mode);
+
+ // Rounding can promote subnormals to normals, and normals to infinities. For
+ // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be
+ // encodable as a float, but rounding based on the low-order mantissa bits
+ // could make it overflow. With ties-to-even rounding, this value would become
+ // an infinity.
+
+ // ---- Rounding Method ----
+ //
+ // The exponent is irrelevant in the rounding operation, so we treat the
+ // lowest-order bit that will fit into the result ('onebit') as having
+ // the value '1'. Similarly, the highest-order bit that won't fit into
+ // the result ('halfbit') has the value '0.5'. The 'point' sits between
+ // 'onebit' and 'halfbit':
+ //
+ // These bits fit into the result.
+ // |---------------------|
+ // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ // ||
+ // / |
+ // / halfbit
+ // onebit
+ //
+ // For subnormal outputs, the range of representable bits is smaller and
+ // the position of onebit and halfbit depends on the exponent of the
+ // input, but the method is otherwise similar.
+ //
+ // onebit(frac)
+ // |
+ // | halfbit(frac) halfbit(adjusted)
+ // | / /
+ // | | |
+ // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00
+ // 0b00.0... -> 0b00.0... -> 0b00
+ // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00
+ // 0b00.1... -> 0b00.1... -> 0b01
+ // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01
+ // 0b01.0... -> 0b01.0... -> 0b01
+ // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10
+ // 0b01.1... -> 0b01.1... -> 0b10
+ // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10
+ // 0b10.0... -> 0b10.0... -> 0b10
+ // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10
+ // 0b10.1... -> 0b10.1... -> 0b11
+ // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11
+ // ... / | / |
+ // / | / |
+ // / |
+ // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / |
+ //
+ // mantissa = (mantissa >> shift) + halfbit(adjusted);
+
+ static const int mantissa_offset = 0;
+ static const int exponent_offset = mantissa_offset + mbits;
+ static const int sign_offset = exponent_offset + ebits;
+ STATIC_ASSERT(sign_offset == (sizeof(T) * kByteSize - 1));
+
+ // Bail out early for zero inputs.
+ if (mantissa == 0) {
+ return sign << sign_offset;
+ }
+
+ // If all bits in the exponent are set, the value is infinite or NaN.
+ // This is true for all binary IEEE-754 formats.
+ static const int infinite_exponent = (1 << ebits) - 1;
+ static const int max_normal_exponent = infinite_exponent - 1;
+
+ // Apply the exponent bias to encode it for the result. Doing this early makes
+ // it easy to detect values that will be infinite or subnormal.
+ exponent += max_normal_exponent >> 1;
+
+ if (exponent > max_normal_exponent) {
+ // Overflow: The input is too large for the result type to represent. The
+ // FPTieEven rounding mode handles overflows using infinities.
+ exponent = infinite_exponent;
+ mantissa = 0;
+ return (sign << sign_offset) |
+ (exponent << exponent_offset) |
+ (mantissa << mantissa_offset);
+ }
+
+ // Calculate the shift required to move the top mantissa bit to the proper
+ // place in the destination type.
+ const int highest_significant_bit = 63 - CountLeadingZeros(mantissa, 64);
+ int shift = highest_significant_bit - mbits;
+
+ if (exponent <= 0) {
+ // The output will be subnormal (before rounding).
+
+ // For subnormal outputs, the shift must be adjusted by the exponent. The +1
+ // is necessary because the exponent of a subnormal value (encoded as 0) is
+ // the same as the exponent of the smallest normal value (encoded as 1).
+ shift += -exponent + 1;
+
+ // Handle inputs that would produce a zero output.
+ //
+ // Shifts higher than highest_significant_bit+1 will always produce a zero
+ // result. A shift of exactly highest_significant_bit+1 might produce a
+ // non-zero result after rounding.
+ if (shift > (highest_significant_bit + 1)) {
+ // The result will always be +/-0.0.
+ return sign << sign_offset;
+ }
+
+ // Properly encode the exponent for a subnormal output.
+ exponent = 0;
+ } else {
+ // Clear the topmost mantissa bit, since this is not encoded in IEEE-754
+ // normal values.
+ mantissa &= ~(1UL << highest_significant_bit);
+ }
+
+ if (shift > 0) {
+ // We have to shift the mantissa to the right. Some precision is lost, so we
+ // need to apply rounding.
+ uint64_t onebit_mantissa = (mantissa >> (shift)) & 1;
+ uint64_t halfbit_mantissa = (mantissa >> (shift-1)) & 1;
+ uint64_t adjusted = mantissa - (halfbit_mantissa & ~onebit_mantissa);
+ T halfbit_adjusted = (adjusted >> (shift-1)) & 1;
+
+ T result = (sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa >> shift) << mantissa_offset);
+
+ // A very large mantissa can overflow during rounding. If this happens, the
+ // exponent should be incremented and the mantissa set to 1.0 (encoded as
+ // 0). Applying halfbit_adjusted after assembling the float has the nice
+ // side-effect that this case is handled for free.
+ //
+ // This also handles cases where a very large finite value overflows to
+ // infinity, or where a very large subnormal value overflows to become
+ // normal.
+ return result + halfbit_adjusted;
+ } else {
+ // We have to shift the mantissa to the left (or not at all). The input
+ // mantissa is exactly representable in the output mantissa, so apply no
+ // rounding correction.
+ return (sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa << -shift) << mantissa_offset);
+ }
+}
+
+
+// See FPRound for a description of this function.
+static inline double FPRoundToDouble(int64_t sign, int64_t exponent,
+ uint64_t mantissa, FPRounding round_mode) {
+ int64_t bits =
+ FPRound<int64_t, kDoubleExponentBits, kDoubleMantissaBits>(sign,
+ exponent,
+ mantissa,
+ round_mode);
+ return rawbits_to_double(bits);
+}
+
+
+// See FPRound for a description of this function.
+static inline float FPRoundToFloat(int64_t sign, int64_t exponent,
+ uint64_t mantissa, FPRounding round_mode) {
+ int32_t bits =
+ FPRound<int32_t, kFloatExponentBits, kFloatMantissaBits>(sign,
+ exponent,
+ mantissa,
+ round_mode);
+ return rawbits_to_float(bits);
+}
+
+
+double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) {
+ if (src >= 0) {
+ return UFixedToDouble(src, fbits, round);
+ } else {
+ // This works for all negative values, including INT64_MIN.
+ return -UFixedToDouble(-src, fbits, round);
+ }
+}
+
+
+double Simulator::UFixedToDouble(uint64_t src, int fbits, FPRounding round) {
+ // An input of 0 is a special case because the result is effectively
+ // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
+ if (src == 0) {
+ return 0.0;
+ }
+
+ // Calculate the exponent. The highest significant bit will have the value
+ // 2^exponent.
+ const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
+ const int64_t exponent = highest_significant_bit - fbits;
+
+ return FPRoundToDouble(0, exponent, src, round);
+}
+
+
+float Simulator::FixedToFloat(int64_t src, int fbits, FPRounding round) {
+ if (src >= 0) {
+ return UFixedToFloat(src, fbits, round);
+ } else {
+ // This works for all negative values, including INT64_MIN.
+ return -UFixedToFloat(-src, fbits, round);
+ }
+}
+
+
+float Simulator::UFixedToFloat(uint64_t src, int fbits, FPRounding round) {
+ // An input of 0 is a special case because the result is effectively
+ // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
+ if (src == 0) {
+ return 0.0f;
+ }
+
+ // Calculate the exponent. The highest significant bit will have the value
+ // 2^exponent.
+ const int highest_significant_bit = 63 - CountLeadingZeros(src, 64);
+ const int32_t exponent = highest_significant_bit - fbits;
+
+ return FPRoundToFloat(0, exponent, src, round);
+}
+
+
+double Simulator::FPRoundInt(double value, FPRounding round_mode) {
+ if ((value == 0.0) || (value == kFP64PositiveInfinity) ||
+ (value == kFP64NegativeInfinity)) {
+ return value;
+ } else if (std::isnan(value)) {
+ return FPProcessNaN(value);
+ }
+
+ double int_result = floor(value);
+ double error = value - int_result;
+ switch (round_mode) {
+ case FPTieAway: {
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is positive, round up.
+ if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) {
+ int_result++;
+ }
+ break;
+ }
+ case FPTieEven: {
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is odd, round up.
+ if ((error > 0.5) ||
+ ((error == 0.5) && (fmod(int_result, 2) != 0))) {
+ int_result++;
+ }
+ break;
+ }
+ case FPZero: {
+ // If value > 0 then we take floor(value)
+ // otherwise, ceil(value)
+ if (value < 0) {
+ int_result = ceil(value);
+ }
+ break;
+ }
+ case FPNegativeInfinity: {
+ // We always use floor(value).
+ break;
+ }
+ default: UNIMPLEMENTED();
+ }
+ return int_result;
+}
+
+
+double Simulator::FPToDouble(float value) {
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (fpcr().DN()) return kFP64DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred entirely, except that the top
+ // bit is forced to '1', making the result a quiet NaN. The unused
+ // (low-order) payload bits are set to 0.
+ uint32_t raw = float_to_rawbits(value);
+
+ uint64_t sign = raw >> 31;
+ uint64_t exponent = (1 << 11) - 1;
+ uint64_t payload = unsigned_bitextract_64(21, 0, raw);
+ payload <<= (52 - 23); // The unused low-order bits should be 0.
+ payload |= (1L << 51); // Force a quiet NaN.
+
+ return rawbits_to_double((sign << 63) | (exponent << 52) | payload);
+ }
+
+ case FP_ZERO:
+ case FP_NORMAL:
+ case FP_SUBNORMAL:
+ case FP_INFINITE: {
+ // All other inputs are preserved in a standard cast, because every value
+ // representable using an IEEE-754 float is also representable using an
+ // IEEE-754 double.
+ return static_cast<double>(value);
+ }
+ }
+
+ UNREACHABLE();
+ return static_cast<double>(value);
+}
+
+
+float Simulator::FPToFloat(double value, FPRounding round_mode) {
+ // Only the FPTieEven rounding mode is implemented.
+ ASSERT(round_mode == FPTieEven);
+ USE(round_mode);
+
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (fpcr().DN()) return kFP32DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred as much as possible, except
+ // that the top bit is forced to '1', making the result a quiet NaN.
+ uint64_t raw = double_to_rawbits(value);
+
+ uint32_t sign = raw >> 63;
+ uint32_t exponent = (1 << 8) - 1;
+ uint32_t payload = unsigned_bitextract_64(50, 52 - 23, raw);
+ payload |= (1 << 22); // Force a quiet NaN.
+
+ return rawbits_to_float((sign << 31) | (exponent << 23) | payload);
+ }
+
+ case FP_ZERO:
+ case FP_INFINITE: {
+ // In a C++ cast, any value representable in the target type will be
+ // unchanged. This is always the case for +/-0.0 and infinities.
+ return static_cast<float>(value);
+ }
+
+ case FP_NORMAL:
+ case FP_SUBNORMAL: {
+ // Convert double-to-float as the processor would, assuming that FPCR.FZ
+ // (flush-to-zero) is not set.
+ uint64_t raw = double_to_rawbits(value);
+ // Extract the IEEE-754 double components.
+ uint32_t sign = raw >> 63;
+ // Extract the exponent and remove the IEEE-754 encoding bias.
+ int32_t exponent = unsigned_bitextract_64(62, 52, raw) - 1023;
+ // Extract the mantissa and add the implicit '1' bit.
+ uint64_t mantissa = unsigned_bitextract_64(51, 0, raw);
+ if (std::fpclassify(value) == FP_NORMAL) {
+ mantissa |= (1UL << 52);
+ }
+ return FPRoundToFloat(sign, exponent, mantissa, round_mode);
+ }
+ }
+
+ UNREACHABLE();
+ return value;
+}
+
+
+void Simulator::VisitFPDataProcessing2Source(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+
+ // Fmaxnm and Fminnm have special NaN handling.
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ case FMAXNM_s: set_sreg(fd, FPMaxNM(sreg(fn), sreg(fm))); return;
+ case FMAXNM_d: set_dreg(fd, FPMaxNM(dreg(fn), dreg(fm))); return;
+ case FMINNM_s: set_sreg(fd, FPMinNM(sreg(fn), sreg(fm))); return;
+ case FMINNM_d: set_dreg(fd, FPMinNM(dreg(fn), dreg(fm))); return;
+ default:
+ break; // Fall through.
+ }
+
+ if (FPProcessNaNs(instr)) return;
+
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ case FADD_s: set_sreg(fd, FPAdd(sreg(fn), sreg(fm))); break;
+ case FADD_d: set_dreg(fd, FPAdd(dreg(fn), dreg(fm))); break;
+ case FSUB_s: set_sreg(fd, FPSub(sreg(fn), sreg(fm))); break;
+ case FSUB_d: set_dreg(fd, FPSub(dreg(fn), dreg(fm))); break;
+ case FMUL_s: set_sreg(fd, FPMul(sreg(fn), sreg(fm))); break;
+ case FMUL_d: set_dreg(fd, FPMul(dreg(fn), dreg(fm))); break;
+ case FDIV_s: set_sreg(fd, FPDiv(sreg(fn), sreg(fm))); break;
+ case FDIV_d: set_dreg(fd, FPDiv(dreg(fn), dreg(fm))); break;
+ case FMAX_s: set_sreg(fd, FPMax(sreg(fn), sreg(fm))); break;
+ case FMAX_d: set_dreg(fd, FPMax(dreg(fn), dreg(fm))); break;
+ case FMIN_s: set_sreg(fd, FPMin(sreg(fn), sreg(fm))); break;
+ case FMIN_d: set_dreg(fd, FPMin(dreg(fn), dreg(fm))); break;
+ case FMAXNM_s:
+ case FMAXNM_d:
+ case FMINNM_s:
+ case FMINNM_d:
+ // These were handled before the standard FPProcessNaNs() stage.
+ UNREACHABLE();
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPDataProcessing3Source(Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+ unsigned fa = instr->Ra();
+
+ switch (instr->Mask(FPDataProcessing3SourceMask)) {
+ // fd = fa +/- (fn * fm)
+ case FMADD_s: set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm))); break;
+ case FMSUB_s: set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm))); break;
+ case FMADD_d: set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm))); break;
+ case FMSUB_d: set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm))); break;
+ // Negated variants of the above.
+ case FNMADD_s:
+ set_sreg(fd, FPMulAdd(-sreg(fa), -sreg(fn), sreg(fm)));
+ break;
+ case FNMSUB_s:
+ set_sreg(fd, FPMulAdd(-sreg(fa), sreg(fn), sreg(fm)));
+ break;
+ case FNMADD_d:
+ set_dreg(fd, FPMulAdd(-dreg(fa), -dreg(fn), dreg(fm)));
+ break;
+ case FNMSUB_d:
+ set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm)));
+ break;
+ default: UNIMPLEMENTED();
+ }
+}
+
+
+template <typename T>
+T Simulator::FPAdd(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) {
+ // inf + -inf returns the default NaN.
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 + op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPDiv(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) {
+ // inf / inf and 0.0 / 0.0 return the default NaN.
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 / op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMax(T a, T b) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(a) && !std::isnan(b));
+
+ if ((a == 0.0) && (b == 0.0) &&
+ (copysign(1.0, a) != copysign(1.0, b))) {
+ // a and b are zero, and the sign differs: return +0.0.
+ return 0.0;
+ } else {
+ return (a > b) ? a : b;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMaxNM(T a, T b) {
+ if (IsQuietNaN(a) && !IsQuietNaN(b)) {
+ a = kFP64NegativeInfinity;
+ } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
+ b = kFP64NegativeInfinity;
+ }
+
+ T result = FPProcessNaNs(a, b);
+ return std::isnan(result) ? result : FPMax(a, b);
+}
+
+template <typename T>
+T Simulator::FPMin(T a, T b) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!isnan(a) && !isnan(b));
+
+ if ((a == 0.0) && (b == 0.0) &&
+ (copysign(1.0, a) != copysign(1.0, b))) {
+ // a and b are zero, and the sign differs: return -0.0.
+ return -0.0;
+ } else {
+ return (a < b) ? a : b;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMinNM(T a, T b) {
+ if (IsQuietNaN(a) && !IsQuietNaN(b)) {
+ a = kFP64PositiveInfinity;
+ } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
+ b = kFP64PositiveInfinity;
+ }
+
+ T result = FPProcessNaNs(a, b);
+ return std::isnan(result) ? result : FPMin(a, b);
+}
+
+
+template <typename T>
+T Simulator::FPMul(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) {
+ // inf * 0.0 returns the default NaN.
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 * op2;
+ }
+}
+
+
+template<typename T>
+T Simulator::FPMulAdd(T a, T op1, T op2) {
+ T result = FPProcessNaNs3(a, op1, op2);
+
+ T sign_a = copysign(1.0, a);
+ T sign_prod = copysign(1.0, op1) * copysign(1.0, op2);
+ bool isinf_prod = std::isinf(op1) || std::isinf(op2);
+ bool operation_generates_nan =
+ (std::isinf(op1) && (op2 == 0.0)) || // inf * 0.0
+ (std::isinf(op2) && (op1 == 0.0)) || // 0.0 * inf
+ (std::isinf(a) && isinf_prod && (sign_a != sign_prod)); // inf - inf
+
+ if (std::isnan(result)) {
+ // Generated NaNs override quiet NaNs propagated from a.
+ if (operation_generates_nan && IsQuietNaN(a)) {
+ return FPDefaultNaN<T>();
+ } else {
+ return result;
+ }
+ }
+
+ // If the operation would produce a NaN, return the default NaN.
+ if (operation_generates_nan) {
+ return FPDefaultNaN<T>();
+ }
+
+ // Work around broken fma implementations for exact zero results: The sign of
+ // exact 0.0 results is positive unless both a and op1 * op2 are negative.
+ if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) {
+ return ((sign_a < 0) && (sign_prod < 0)) ? -0.0 : 0.0;
+ }
+
+ result = FusedMultiplyAdd(op1, op2, a);
+ ASSERT(!std::isnan(result));
+
+ // Work around broken fma implementations for rounded zero results: If a is
+ // 0.0, the sign of the result is the sign of op1 * op2 before rounding.
+ if ((a == 0.0) && (result == 0.0)) {
+ return copysign(0.0, sign_prod);
+ }
+
+ return result;
+}
+
+
+template <typename T>
+T Simulator::FPSqrt(T op) {
+ if (std::isnan(op)) {
+ return FPProcessNaN(op);
+ } else if (op < 0.0) {
+ return FPDefaultNaN<T>();
+ } else {
+ return std::sqrt(op);
+ }
+}
+
+
+template <typename T>
+T Simulator::FPSub(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) {
+ // inf - inf returns the default NaN.
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 - op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPProcessNaN(T op) {
+ ASSERT(std::isnan(op));
+ return fpcr().DN() ? FPDefaultNaN<T>() : ToQuietNaN(op);
+}
+
+
+template <typename T>
+T Simulator::FPProcessNaNs(T op1, T op2) {
+ if (IsSignallingNaN(op1)) {
+ return FPProcessNaN(op1);
+ } else if (IsSignallingNaN(op2)) {
+ return FPProcessNaN(op2);
+ } else if (std::isnan(op1)) {
+ ASSERT(IsQuietNaN(op1));
+ return FPProcessNaN(op1);
+ } else if (std::isnan(op2)) {
+ ASSERT(IsQuietNaN(op2));
+ return FPProcessNaN(op2);
+ } else {
+ return 0.0;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPProcessNaNs3(T op1, T op2, T op3) {
+ if (IsSignallingNaN(op1)) {
+ return FPProcessNaN(op1);
+ } else if (IsSignallingNaN(op2)) {
+ return FPProcessNaN(op2);
+ } else if (IsSignallingNaN(op3)) {
+ return FPProcessNaN(op3);
+ } else if (std::isnan(op1)) {
+ ASSERT(IsQuietNaN(op1));
+ return FPProcessNaN(op1);
+ } else if (std::isnan(op2)) {
+ ASSERT(IsQuietNaN(op2));
+ return FPProcessNaN(op2);
+ } else if (std::isnan(op3)) {
+ ASSERT(IsQuietNaN(op3));
+ return FPProcessNaN(op3);
+ } else {
+ return 0.0;
+ }
+}
+
+
+bool Simulator::FPProcessNaNs(Instruction* instr) {
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+ bool done = false;
+
+ if (instr->Mask(FP64) == FP64) {
+ double result = FPProcessNaNs(dreg(fn), dreg(fm));
+ if (std::isnan(result)) {
+ set_dreg(fd, result);
+ done = true;
+ }
+ } else {
+ float result = FPProcessNaNs(sreg(fn), sreg(fm));
+ if (std::isnan(result)) {
+ set_sreg(fd, result);
+ done = true;
+ }
+ }
+
+ return done;
+}
+
+
+void Simulator::VisitSystem(Instruction* instr) {
+ // Some system instructions hijack their Op and Cp fields to represent a
+ // range of immediates instead of indicating a different instruction. This
+ // makes the decoding tricky.
+ if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+ switch (instr->Mask(SystemSysRegMask)) {
+ case MRS: {
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: set_xreg(instr->Rt(), nzcv().RawValue()); break;
+ case FPCR: set_xreg(instr->Rt(), fpcr().RawValue()); break;
+ default: UNIMPLEMENTED();
+ }
+ break;
+ }
+ case MSR: {
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: nzcv().SetRawValue(xreg(instr->Rt())); break;
+ case FPCR: fpcr().SetRawValue(xreg(instr->Rt())); break;
+ default: UNIMPLEMENTED();
+ }
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
+ ASSERT(instr->Mask(SystemHintMask) == HINT);
+ switch (instr->ImmHint()) {
+ case NOP: break;
+ default: UNIMPLEMENTED();
+ }
+ } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+ __sync_synchronize();
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+
+bool Simulator::GetValue(const char* desc, int64_t* value) {
+ int regnum = CodeFromName(desc);
+ if (regnum >= 0) {
+ unsigned code = regnum;
+ if (code == kZeroRegCode) {
+ // Catch the zero register and return 0.
+ *value = 0;
+ return true;
+ } else if (code == kSPRegInternalCode) {
+ // Translate the stack pointer code to 31, for Reg31IsStackPointer.
+ code = 31;
+ }
+ if (desc[0] == 'w') {
+ *value = wreg(code, Reg31IsStackPointer);
+ } else {
+ *value = xreg(code, Reg31IsStackPointer);
+ }
+ return true;
+ } else if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc + 2, "%" SCNx64,
+ reinterpret_cast<uint64_t*>(value)) == 1;
+ } else {
+ return SScanF(desc, "%" SCNu64,
+ reinterpret_cast<uint64_t*>(value)) == 1;
+ }
+}
+
+
+bool Simulator::PrintValue(const char* desc) {
+ if (strcmp(desc, "csp") == 0) {
+ ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
+ PrintF("%s csp:%s 0x%016" PRIx64 "%s\n",
+ clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
+ return true;
+ } else if (strcmp(desc, "wcsp") == 0) {
+ ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
+ PrintF("%s wcsp:%s 0x%08" PRIx32 "%s\n",
+ clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
+ return true;
+ }
+
+ int i = CodeFromName(desc);
+ STATIC_ASSERT(kNumberOfRegisters == kNumberOfFPRegisters);
+ if (i < 0 || static_cast<unsigned>(i) >= kNumberOfFPRegisters) return false;
+
+ if (desc[0] == 'v') {
+ PrintF("%s %s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
+ clr_fpreg_name, VRegNameForCode(i),
+ clr_fpreg_value, double_to_rawbits(dreg(i)),
+ clr_normal,
+ clr_fpreg_name, DRegNameForCode(i),
+ clr_fpreg_value, dreg(i),
+ clr_fpreg_name, SRegNameForCode(i),
+ clr_fpreg_value, sreg(i),
+ clr_normal);
+ return true;
+ } else if (desc[0] == 'd') {
+ PrintF("%s %s:%s %g%s\n",
+ clr_fpreg_name, DRegNameForCode(i),
+ clr_fpreg_value, dreg(i),
+ clr_normal);
+ return true;
+ } else if (desc[0] == 's') {
+ PrintF("%s %s:%s %g%s\n",
+ clr_fpreg_name, SRegNameForCode(i),
+ clr_fpreg_value, sreg(i),
+ clr_normal);
+ return true;
+ } else if (desc[0] == 'w') {
+ PrintF("%s %s:%s 0x%08" PRIx32 "%s\n",
+ clr_reg_name, WRegNameForCode(i), clr_reg_value, wreg(i), clr_normal);
+ return true;
+ } else {
+ // X register names have a wide variety of starting characters, but anything
+ // else will be an X register.
+ PrintF("%s %s:%s 0x%016" PRIx64 "%s\n",
+ clr_reg_name, XRegNameForCode(i), clr_reg_value, xreg(i), clr_normal);
+ return true;
+ }
+}
+
+
+void Simulator::Debug() {
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = { cmd, arg1, arg2 };
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ bool done = false;
+ bool cleared_log_disasm_bit = false;
+
+ while (!done) {
+ // Disassemble the next instruction to execute before doing anything else.
+ PrintInstructionsAt(pc_, 1);
+ // Read the command line.
+ char* line = ReadLine("sim> ");
+ if (line == NULL) {
+ break;
+ } else {
+ // Repeat last command by default.
+ char* last_input = last_debugger_input();
+ if (strcmp(line, "\n") == 0 && (last_input != NULL)) {
+ DeleteArray(line);
+ line = last_input;
+ } else {
+ // Update the latest command ran
+ set_last_debugger_input(line);
+ }
+
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = SScanF(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+
+ // stepi / si ------------------------------------------------------------
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ // We are about to execute instructions, after which by default we
+ // should increment the pc_. If it was set when reaching this debug
+ // instruction, it has not been cleared because this instruction has not
+ // completed yet. So clear it manually.
+ pc_modified_ = false;
+
+ if (argc == 1) {
+ ExecuteInstruction();
+ } else {
+ int64_t number_of_instructions_to_execute = 1;
+ GetValue(arg1, &number_of_instructions_to_execute);
+
+ set_log_parameters(log_parameters() | LOG_DISASM);
+ while (number_of_instructions_to_execute-- > 0) {
+ ExecuteInstruction();
+ }
+ set_log_parameters(log_parameters() & ~LOG_DISASM);
+ PrintF("\n");
+ }
+
+ // If it was necessary, the pc has already been updated or incremented
+ // when executing the instruction. So we do not want it to be updated
+ // again. It will be cleared when exiting.
+ pc_modified_ = true;
+
+ // next / n --------------------------------------------------------------
+ } else if ((strcmp(cmd, "next") == 0) || (strcmp(cmd, "n") == 0)) {
+ // Tell the simulator to break after the next executed BL.
+ break_on_next_ = true;
+ // Continue.
+ done = true;
+
+ // continue / cont / c ---------------------------------------------------
+ } else if ((strcmp(cmd, "continue") == 0) ||
+ (strcmp(cmd, "cont") == 0) ||
+ (strcmp(cmd, "c") == 0)) {
+ // Leave the debugger shell.
+ done = true;
+
+ // disassemble / disasm / di ---------------------------------------------
+ } else if (strcmp(cmd, "disassemble") == 0 ||
+ strcmp(cmd, "disasm") == 0 ||
+ strcmp(cmd, "di") == 0) {
+ int64_t n_of_instrs_to_disasm = 10; // default value.
+ int64_t address = reinterpret_cast<int64_t>(pc_); // default value.
+ if (argc >= 2) { // disasm <n of instrs>
+ GetValue(arg1, &n_of_instrs_to_disasm);
+ }
+ if (argc >= 3) { // disasm <n of instrs> <address>
+ GetValue(arg2, &address);
+ }
+
+ // Disassemble.
+ PrintInstructionsAt(reinterpret_cast<Instruction*>(address),
+ n_of_instrs_to_disasm);
+ PrintF("\n");
+
+ // print / p -------------------------------------------------------------
+ } else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
+ if (argc == 2) {
+ if (strcmp(arg1, "all") == 0) {
+ PrintRegisters(true);
+ PrintFPRegisters(true);
+ } else {
+ if (!PrintValue(arg1)) {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ PrintF(
+ "print <register>\n"
+ " Print the content of a register. (alias 'p')\n"
+ " 'print all' will print all registers.\n"
+ " Use 'printobject' to get more details about the value.\n");
+ }
+
+ // printobject / po ------------------------------------------------------
+ } else if ((strcmp(cmd, "printobject") == 0) ||
+ (strcmp(cmd, "po") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ Object* obj = reinterpret_cast<Object*>(value);
+ PrintF("%s: \n", arg1);
+#ifdef DEBUG
+ obj->PrintLn();
+#else
+ obj->ShortPrint();
+ PrintF("\n");
+#endif
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("printobject <value>\n"
+ "printobject <register>\n"
+ " Print details about the value. (alias 'po')\n");
+ }
+
+ // stack / mem ----------------------------------------------------------
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int64_t* cur = NULL;
+ int64_t* end = NULL;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int64_t*>(jssp());
+
+ } else { // "mem"
+ int64_t value;
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int64_t*>(value);
+ next_arg++;
+ }
+
+ int64_t words = 0;
+ if (argc == next_arg) {
+ words = 10;
+ } else if (argc == next_arg + 1) {
+ if (!GetValue(argv[next_arg], &words)) {
+ PrintF("%s unrecognized\n", argv[next_arg]);
+ PrintF("Printing 10 double words by default");
+ words = 10;
+ }
+ } else {
+ UNREACHABLE();
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ PrintF(" 0x%016" PRIx64 ": 0x%016" PRIx64 " %10" PRId64,
+ reinterpret_cast<uint64_t>(cur), *cur, *cur);
+ HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
+ int64_t value = *cur;
+ Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ if (((value & 1) == 0) || current_heap->Contains(obj)) {
+ PrintF(" (");
+ if ((value & kSmiTagMask) == 0) {
+ STATIC_ASSERT(kSmiValueSize == 32);
+ int32_t untagged = (value >> kSmiShift) & 0xffffffff;
+ PrintF("smi %" PRId32, untagged);
+ } else {
+ obj->ShortPrint();
+ }
+ PrintF(")");
+ }
+ PrintF("\n");
+ cur++;
+ }
+
+ // trace / t -------------------------------------------------------------
+ } else if (strcmp(cmd, "trace") == 0 || strcmp(cmd, "t") == 0) {
+ if ((log_parameters() & (LOG_DISASM | LOG_REGS)) !=
+ (LOG_DISASM | LOG_REGS)) {
+ PrintF("Enabling disassembly and registers tracing\n");
+ set_log_parameters(log_parameters() | LOG_DISASM | LOG_REGS);
+ } else {
+ PrintF("Disabling disassembly and registers tracing\n");
+ set_log_parameters(log_parameters() & ~(LOG_DISASM | LOG_REGS));
+ }
+
+ // break / b -------------------------------------------------------------
+ } else if (strcmp(cmd, "break") == 0 || strcmp(cmd, "b") == 0) {
+ if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ SetBreakpoint(reinterpret_cast<Instruction*>(value));
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ ListBreakpoints();
+ PrintF("Use `break <address>` to set or disable a breakpoint\n");
+ }
+
+ // gdb -------------------------------------------------------------------
+ } else if (strcmp(cmd, "gdb") == 0) {
+ PrintF("Relinquishing control to gdb.\n");
+ OS::DebugBreak();
+ PrintF("Regaining control from gdb.\n");
+
+ // sysregs ---------------------------------------------------------------
+ } else if (strcmp(cmd, "sysregs") == 0) {
+ PrintSystemRegisters();
+
+ // help / h --------------------------------------------------------------
+ } else if (strcmp(cmd, "help") == 0 || strcmp(cmd, "h") == 0) {
+ PrintF(
+ "stepi / si\n"
+ " stepi <n>\n"
+ " Step <n> instructions.\n"
+ "next / n\n"
+ " Continue execution until a BL instruction is reached.\n"
+ " At this point a breakpoint is set just after this BL.\n"
+ " Then execution is resumed. It will probably later hit the\n"
+ " breakpoint just set.\n"
+ "continue / cont / c\n"
+ " Continue execution from here.\n"
+ "disassemble / disasm / di\n"
+ " disassemble <n> <address>\n"
+ " Disassemble <n> instructions from current <address>.\n"
+ " By default <n> is 20 and <address> is the current pc.\n"
+ "print / p\n"
+ " print <register>\n"
+ " Print the content of a register.\n"
+ " 'print all' will print all registers.\n"
+ " Use 'printobject' to get more details about the value.\n"
+ "printobject / po\n"
+ " printobject <value>\n"
+ " printobject <register>\n"
+ " Print details about the value.\n"
+ "stack\n"
+ " stack [<words>]\n"
+ " Dump stack content, default dump 10 words\n"
+ "mem\n"
+ " mem <address> [<words>]\n"
+ " Dump memory content, default dump 10 words\n"
+ "trace / t\n"
+ " Toggle disassembly and register tracing\n"
+ "break / b\n"
+ " break : list all breakpoints\n"
+ " break <address> : set / enable / disable a breakpoint.\n"
+ "gdb\n"
+ " Enter gdb.\n"
+ "sysregs\n"
+ " Print all system registers (including NZCV).\n");
+ } else {
+ PrintF("Unknown command: %s\n", cmd);
+ PrintF("Use 'help' for more information.\n");
+ }
+ }
+ if (cleared_log_disasm_bit == true) {
+ set_log_parameters(log_parameters_ | LOG_DISASM);
+ }
+ }
+}
+
+
+void Simulator::VisitException(Instruction* instr) {
+ switch (instr->Mask(ExceptionMask)) {
+ case HLT: {
+ if (instr->ImmException() == kImmExceptionIsDebug) {
+ // Read the arguments encoded inline in the instruction stream.
+ uint32_t code;
+ uint32_t parameters;
+
+ memcpy(&code,
+ pc_->InstructionAtOffset(kDebugCodeOffset),
+ sizeof(code));
+ memcpy(&parameters,
+ pc_->InstructionAtOffset(kDebugParamsOffset),
+ sizeof(parameters));
+ char const *message =
+ reinterpret_cast<char const*>(
+ pc_->InstructionAtOffset(kDebugMessageOffset));
+
+ // Always print something when we hit a debug point that breaks.
+ // We are going to break, so printing something is not an issue in
+ // terms of speed.
+ if (FLAG_trace_sim_messages || FLAG_trace_sim || (parameters & BREAK)) {
+ if (message != NULL) {
+ PrintF("%sDebugger hit %d: %s%s%s\n",
+ clr_debug_number,
+ code,
+ clr_debug_message,
+ message,
+ clr_normal);
+ } else {
+ PrintF("%sDebugger hit %d.%s\n",
+ clr_debug_number,
+ code,
+ clr_normal);
+ }
+ }
+
+ // Other options.
+ switch (parameters & kDebuggerTracingDirectivesMask) {
+ case TRACE_ENABLE:
+ set_log_parameters(log_parameters() | parameters);
+ if (parameters & LOG_SYS_REGS) { PrintSystemRegisters(); }
+ if (parameters & LOG_REGS) { PrintRegisters(); }
+ if (parameters & LOG_FP_REGS) { PrintFPRegisters(); }
+ break;
+ case TRACE_DISABLE:
+ set_log_parameters(log_parameters() & ~parameters);
+ break;
+ case TRACE_OVERRIDE:
+ set_log_parameters(parameters);
+ break;
+ default:
+ // We don't support a one-shot LOG_DISASM.
+ ASSERT((parameters & LOG_DISASM) == 0);
+ // Don't print information that is already being traced.
+ parameters &= ~log_parameters();
+ // Print the requested information.
+ if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true);
+ if (parameters & LOG_REGS) PrintRegisters(true);
+ if (parameters & LOG_FP_REGS) PrintFPRegisters(true);
+ }
+
+ // The stop parameters are inlined in the code. Skip them:
+ // - Skip to the end of the message string.
+ size_t size = kDebugMessageOffset + strlen(message) + 1;
+ pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstructionSize));
+ // - Verify that the unreachable marker is present.
+ ASSERT(pc_->Mask(ExceptionMask) == HLT);
+ ASSERT(pc_->ImmException() == kImmExceptionIsUnreachable);
+ // - Skip past the unreachable marker.
+ set_pc(pc_->following());
+
+ // Check if the debugger should break.
+ if (parameters & BREAK) Debug();
+
+ } else if (instr->ImmException() == kImmExceptionIsRedirectedCall) {
+ DoRuntimeCall(instr);
+ } else if (instr->ImmException() == kImmExceptionIsPrintf) {
+ // Read the argument encoded inline in the instruction stream.
+ uint32_t type;
+ memcpy(&type,
+ pc_->InstructionAtOffset(kPrintfTypeOffset),
+ sizeof(type));
+
+ const char* format = reg<const char*>(0);
+
+ // Pass all of the relevant PCS registers onto printf. It doesn't
+ // matter if we pass too many as the extra ones won't be read.
+ int result;
+ fputs(clr_printf, stream_);
+ if (type == CPURegister::kRegister) {
+ result = fprintf(stream_, format,
+ xreg(1), xreg(2), xreg(3), xreg(4),
+ xreg(5), xreg(6), xreg(7));
+ } else if (type == CPURegister::kFPRegister) {
+ result = fprintf(stream_, format,
+ dreg(0), dreg(1), dreg(2), dreg(3),
+ dreg(4), dreg(5), dreg(6), dreg(7));
+ } else {
+ ASSERT(type == CPURegister::kNoRegister);
+ result = fprintf(stream_, "%s", format);
+ }
+ fputs(clr_normal, stream_);
+
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+
+ set_xreg(0, result);
+
+ // The printf parameters are inlined in the code, so skip them.
+ set_pc(pc_->InstructionAtOffset(kPrintfLength));
+
+ // Set LR as if we'd just called a native printf function.
+ set_lr(pc());
+
+ } else if (instr->ImmException() == kImmExceptionIsUnreachable) {
+ fprintf(stream_, "Hit UNREACHABLE marker at PC=%p.\n",
+ reinterpret_cast<void*>(pc_));
+ abort();
+
+ } else {
+ OS::DebugBreak();
+ }
+ break;
+ }
+
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+#endif // USE_SIMULATOR
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
new file mode 100644
index 000000000..6a7353b46
--- /dev/null
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -0,0 +1,908 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_SIMULATOR_ARM64_H_
+#define V8_ARM64_SIMULATOR_ARM64_H_
+
+#include <stdarg.h>
+#include <vector>
+
+#include "v8.h"
+
+#include "globals.h"
+#include "utils.h"
+#include "allocation.h"
+#include "assembler.h"
+#include "arm64/assembler-arm64.h"
+#include "arm64/decoder-arm64.h"
+#include "arm64/disasm-arm64.h"
+#include "arm64/instrument-arm64.h"
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+namespace v8 {
+namespace internal {
+
+#if !defined(USE_SIMULATOR)
+
+// Running without a simulator on a native ARM64 platform.
+// When running without a simulator we call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ (entry(p0, p1, p2, p3, p4))
+
+typedef int (*arm64_regexp_matcher)(String* input,
+ int64_t start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ int64_t output_size,
+ Address stack_base,
+ int64_t direct_call,
+ void* return_address,
+ Isolate* isolate);
+
+// Call the generated regexp code directly. The code at the entry address
+// should act as a function matching the type arm64_regexp_matcher.
+// The ninth argument is a dummy that reserves the space used for
+// the return address added by the ExitFrame in native calls.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ (FUNCTION_CAST<arm64_regexp_matcher>(entry)( \
+ p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ reinterpret_cast<TryCatch*>(try_catch_address)
+
+// Running without a simulator there is nothing to do.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ USE(isolate);
+ return c_limit;
+ }
+
+ static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ return try_catch_address;
+ }
+
+ static void UnregisterCTryCatch() { }
+};
+
+#else // !defined(USE_SIMULATOR)
+
+enum ReverseByteMode {
+ Reverse16 = 0,
+ Reverse32 = 1,
+ Reverse64 = 2
+};
+
+
+// The proper way to initialize a simulated system register (such as NZCV) is as
+// follows:
+// SimSystemRegister nzcv = SimSystemRegister::DefaultValueFor(NZCV);
+class SimSystemRegister {
+ public:
+ // The default constructor represents a register which has no writable bits.
+ // It is not possible to set its value to anything other than 0.
+ SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) { }
+
+ uint32_t RawValue() const {
+ return value_;
+ }
+
+ void SetRawValue(uint32_t new_value) {
+ value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_);
+ }
+
+ uint32_t Bits(int msb, int lsb) const {
+ return unsigned_bitextract_32(msb, lsb, value_);
+ }
+
+ int32_t SignedBits(int msb, int lsb) const {
+ return signed_bitextract_32(msb, lsb, value_);
+ }
+
+ void SetBits(int msb, int lsb, uint32_t bits);
+
+ // Default system register values.
+ static SimSystemRegister DefaultValueFor(SystemRegister id);
+
+#define DEFINE_GETTER(Name, HighBit, LowBit, Func, Type) \
+ Type Name() const { return static_cast<Type>(Func(HighBit, LowBit)); } \
+ void Set##Name(Type bits) { \
+ SetBits(HighBit, LowBit, static_cast<Type>(bits)); \
+ }
+#define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \
+ static const uint32_t Name##WriteIgnoreMask = ~static_cast<uint32_t>(Mask);
+ SYSTEM_REGISTER_FIELDS_LIST(DEFINE_GETTER, DEFINE_WRITE_IGNORE_MASK)
+#undef DEFINE_ZERO_BITS
+#undef DEFINE_GETTER
+
+ protected:
+ // Most system registers only implement a few of the bits in the word. Other
+ // bits are "read-as-zero, write-ignored". The write_ignore_mask argument
+ // describes the bits which are not modifiable.
+ SimSystemRegister(uint32_t value, uint32_t write_ignore_mask)
+ : value_(value), write_ignore_mask_(write_ignore_mask) { }
+
+ uint32_t value_;
+ uint32_t write_ignore_mask_;
+};
+
+
+// Represent a register (r0-r31, v0-v31).
+template<int kSizeInBytes>
+class SimRegisterBase {
+ public:
+ template<typename T>
+ void Set(T new_value, unsigned size = sizeof(T)) {
+ ASSERT(size <= kSizeInBytes);
+ ASSERT(size <= sizeof(new_value));
+ // All AArch64 registers are zero-extending; Writing a W register clears the
+ // top bits of the corresponding X register.
+ memset(value_, 0, kSizeInBytes);
+ memcpy(value_, &new_value, size);
+ }
+
+ // Copy 'size' bytes of the register to the result, and zero-extend to fill
+ // the result.
+ template<typename T>
+ T Get(unsigned size = sizeof(T)) const {
+ ASSERT(size <= kSizeInBytes);
+ T result;
+ memset(&result, 0, sizeof(result));
+ memcpy(&result, value_, size);
+ return result;
+ }
+
+ protected:
+ uint8_t value_[kSizeInBytes];
+};
+typedef SimRegisterBase<kXRegSize> SimRegister; // r0-r31
+typedef SimRegisterBase<kDRegSize> SimFPRegister; // v0-v31
+
+
+class Simulator : public DecoderVisitor {
+ public:
+ explicit Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
+ Isolate* isolate = NULL,
+ FILE* stream = stderr);
+ Simulator();
+ ~Simulator();
+
+ // System functions.
+
+ static void Initialize(Isolate* isolate);
+
+ static Simulator* current(v8::internal::Isolate* isolate);
+
+ class CallArgument;
+
+ // Call an arbitrary function taking an arbitrary number of arguments. The
+ // varargs list must be a set of arguments with type CallArgument, and
+ // terminated by CallArgument::End().
+ void CallVoid(byte* entry, CallArgument* args);
+
+ // Like CallVoid, but expect a return value.
+ int64_t CallInt64(byte* entry, CallArgument* args);
+ double CallDouble(byte* entry, CallArgument* args);
+
+ // V8 calls into generated JS code with 5 parameters and into
+ // generated RegExp code with 10 parameters. These are convenience functions,
+ // which set up the simulator state and grab the result on return.
+ int64_t CallJS(byte* entry,
+ byte* function_entry,
+ JSFunction* func,
+ Object* revc,
+ int64_t argc,
+ Object*** argv);
+ int64_t CallRegExp(byte* entry,
+ String* input,
+ int64_t start_offset,
+ const byte* input_start,
+ const byte* input_end,
+ int* output,
+ int64_t output_size,
+ Address stack_base,
+ int64_t direct_call,
+ void* return_address,
+ Isolate* isolate);
+
+ // A wrapper class that stores an argument for one of the above Call
+ // functions.
+ //
+ // Only arguments up to 64 bits in size are supported.
+ class CallArgument {
+ public:
+ template<typename T>
+ explicit CallArgument(T argument) {
+ ASSERT(sizeof(argument) <= sizeof(bits_));
+ memcpy(&bits_, &argument, sizeof(argument));
+ type_ = X_ARG;
+ }
+
+ explicit CallArgument(double argument) {
+ ASSERT(sizeof(argument) == sizeof(bits_));
+ memcpy(&bits_, &argument, sizeof(argument));
+ type_ = D_ARG;
+ }
+
+ explicit CallArgument(float argument) {
+ // TODO(all): CallArgument(float) is untested, remove this check once
+ // tested.
+ UNIMPLEMENTED();
+ // Make the D register a NaN to try to trap errors if the callee expects a
+ // double. If it expects a float, the callee should ignore the top word.
+ ASSERT(sizeof(kFP64SignallingNaN) == sizeof(bits_));
+ memcpy(&bits_, &kFP64SignallingNaN, sizeof(kFP64SignallingNaN));
+ // Write the float payload to the S register.
+ ASSERT(sizeof(argument) <= sizeof(bits_));
+ memcpy(&bits_, &argument, sizeof(argument));
+ type_ = D_ARG;
+ }
+
+ // This indicates the end of the arguments list, so that CallArgument
+ // objects can be passed into varargs functions.
+ static CallArgument End() { return CallArgument(); }
+
+ int64_t bits() const { return bits_; }
+ bool IsEnd() const { return type_ == NO_ARG; }
+ bool IsX() const { return type_ == X_ARG; }
+ bool IsD() const { return type_ == D_ARG; }
+
+ private:
+ enum CallArgumentType { X_ARG, D_ARG, NO_ARG };
+
+ // All arguments are aligned to at least 64 bits and we don't support
+ // passing bigger arguments, so the payload size can be fixed at 64 bits.
+ int64_t bits_;
+ CallArgumentType type_;
+
+ CallArgument() { type_ = NO_ARG; }
+ };
+
+
+ // Start the debugging command line.
+ void Debug();
+
+ bool GetValue(const char* desc, int64_t* value);
+
+ bool PrintValue(const char* desc);
+
+ // Push an address onto the JS stack.
+ uintptr_t PushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t PopAddress();
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t StackLimit() const;
+
+ void ResetState();
+
+ // Runtime call support.
+ static void* RedirectExternalReference(void* external_function,
+ ExternalReference::Type type);
+ void DoRuntimeCall(Instruction* instr);
+
+ // Run the simulator.
+ static const Instruction* kEndOfSimAddress;
+ void DecodeInstruction();
+ void Run();
+ void RunFrom(Instruction* start);
+
+ // Simulation helpers.
+ template <typename T>
+ void set_pc(T new_pc) {
+ ASSERT(sizeof(T) == sizeof(pc_));
+ memcpy(&pc_, &new_pc, sizeof(T));
+ pc_modified_ = true;
+ }
+ Instruction* pc() { return pc_; }
+
+ void increment_pc() {
+ if (!pc_modified_) {
+ pc_ = pc_->following();
+ }
+
+ pc_modified_ = false;
+ }
+
+ virtual void Decode(Instruction* instr) {
+ decoder_->Decode(instr);
+ }
+
+ void ExecuteInstruction() {
+ ASSERT(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
+ CheckBreakNext();
+ Decode(pc_);
+ LogProcessorState();
+ increment_pc();
+ CheckBreakpoints();
+ }
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ // Register accessors.
+
+ // Return 'size' bits of the value of an integer register, as the specified
+ // type. The value is zero-extended to fill the result.
+ //
+ // The only supported values of 'size' are kXRegSizeInBits and
+ // kWRegSizeInBits.
+ template<typename T>
+ T reg(unsigned size, unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ unsigned size_in_bytes = size / 8;
+ ASSERT(size_in_bytes <= sizeof(T));
+ ASSERT((size == kXRegSizeInBits) || (size == kWRegSizeInBits));
+ ASSERT(code < kNumberOfRegisters);
+
+ if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
+ T result;
+ memset(&result, 0, sizeof(result));
+ return result;
+ }
+ return registers_[code].Get<T>(size_in_bytes);
+ }
+
+ // Like reg(), but infer the access size from the template type.
+ template<typename T>
+ T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<T>(sizeof(T) * 8, code, r31mode);
+ }
+
+ // Common specialized accessors for the reg() template.
+ int32_t wreg(unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int32_t>(code, r31mode);
+ }
+
+ int64_t xreg(unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int64_t>(code, r31mode);
+ }
+
+ int64_t reg(unsigned size, unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int64_t>(size, code, r31mode);
+ }
+
+ // Write 'size' bits of 'value' into an integer register. The value is
+ // zero-extended. This behaviour matches AArch64 register writes.
+ //
+ // The only supported values of 'size' are kXRegSizeInBits and
+ // kWRegSizeInBits.
+ template<typename T>
+ void set_reg(unsigned size, unsigned code, T value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ unsigned size_in_bytes = size / 8;
+ ASSERT(size_in_bytes <= sizeof(T));
+ ASSERT((size == kXRegSizeInBits) || (size == kWRegSizeInBits));
+ ASSERT(code < kNumberOfRegisters);
+
+ if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
+ return;
+ }
+ return registers_[code].Set(value, size_in_bytes);
+ }
+
+ // Like set_reg(), but infer the access size from the template type.
+ template<typename T>
+ void set_reg(unsigned code, T value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(sizeof(value) * 8, code, value, r31mode);
+ }
+
+ // Common specialized accessors for the set_reg() template.
+ void set_wreg(unsigned code, int32_t value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(kWRegSizeInBits, code, value, r31mode);
+ }
+
+ void set_xreg(unsigned code, int64_t value,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(kXRegSizeInBits, code, value, r31mode);
+ }
+
+ // Commonly-used special cases.
+ template<typename T>
+ void set_lr(T value) {
+ ASSERT(sizeof(T) == kPointerSize);
+ set_reg(kLinkRegCode, value);
+ }
+
+ template<typename T>
+ void set_sp(T value) {
+ ASSERT(sizeof(T) == kPointerSize);
+ set_reg(31, value, Reg31IsStackPointer);
+ }
+
+ int64_t sp() { return xreg(31, Reg31IsStackPointer); }
+ int64_t jssp() { return xreg(kJSSPCode, Reg31IsStackPointer); }
+ int64_t fp() {
+ return xreg(kFramePointerRegCode, Reg31IsStackPointer);
+ }
+ Instruction* lr() { return reg<Instruction*>(kLinkRegCode); }
+
+ Address get_sp() { return reg<Address>(31, Reg31IsStackPointer); }
+
+ // Return 'size' bits of the value of a floating-point register, as the
+ // specified type. The value is zero-extended to fill the result.
+ //
+ // The only supported values of 'size' are kDRegSizeInBits and
+ // kSRegSizeInBits.
+ template<typename T>
+ T fpreg(unsigned size, unsigned code) const {
+ unsigned size_in_bytes = size / 8;
+ ASSERT(size_in_bytes <= sizeof(T));
+ ASSERT((size == kDRegSizeInBits) || (size == kSRegSizeInBits));
+ ASSERT(code < kNumberOfFPRegisters);
+ return fpregisters_[code].Get<T>(size_in_bytes);
+ }
+
+ // Like fpreg(), but infer the access size from the template type.
+ template<typename T>
+ T fpreg(unsigned code) const {
+ return fpreg<T>(sizeof(T) * 8, code);
+ }
+
+ // Common specialized accessors for the fpreg() template.
+ float sreg(unsigned code) const {
+ return fpreg<float>(code);
+ }
+
+ uint32_t sreg_bits(unsigned code) const {
+ return fpreg<uint32_t>(code);
+ }
+
+ double dreg(unsigned code) const {
+ return fpreg<double>(code);
+ }
+
+ uint64_t dreg_bits(unsigned code) const {
+ return fpreg<uint64_t>(code);
+ }
+
+ double fpreg(unsigned size, unsigned code) const {
+ switch (size) {
+ case kSRegSizeInBits: return sreg(code);
+ case kDRegSizeInBits: return dreg(code);
+ default:
+ UNREACHABLE();
+ return 0.0;
+ }
+ }
+
+ // Write 'value' into a floating-point register. The value is zero-extended.
+ // This behaviour matches AArch64 register writes.
+ template<typename T>
+ void set_fpreg(unsigned code, T value) {
+ ASSERT((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize));
+ ASSERT(code < kNumberOfFPRegisters);
+ fpregisters_[code].Set(value, sizeof(value));
+ }
+
+ // Common specialized accessors for the set_fpreg() template.
+ void set_sreg(unsigned code, float value) {
+ set_fpreg(code, value);
+ }
+
+ void set_sreg_bits(unsigned code, uint32_t value) {
+ set_fpreg(code, value);
+ }
+
+ void set_dreg(unsigned code, double value) {
+ set_fpreg(code, value);
+ }
+
+ void set_dreg_bits(unsigned code, uint64_t value) {
+ set_fpreg(code, value);
+ }
+
+ SimSystemRegister& nzcv() { return nzcv_; }
+ SimSystemRegister& fpcr() { return fpcr_; }
+
+ // Debug helpers
+
+ // Simulator breakpoints.
+ struct Breakpoint {
+ Instruction* location;
+ bool enabled;
+ };
+ std::vector<Breakpoint> breakpoints_;
+ void SetBreakpoint(Instruction* breakpoint);
+ void ListBreakpoints();
+ void CheckBreakpoints();
+
+ // Helpers for the 'next' command.
+ // When this is set, the Simulator will insert a breakpoint after the next BL
+ // instruction it meets.
+ bool break_on_next_;
+ // Check if the Simulator should insert a break after the current instruction
+ // for the 'next' command.
+ void CheckBreakNext();
+
+ // Disassemble instruction at the given address.
+ void PrintInstructionsAt(Instruction* pc, uint64_t count);
+
+ void PrintSystemRegisters(bool print_all = false);
+ void PrintRegisters(bool print_all_regs = false);
+ void PrintFPRegisters(bool print_all_regs = false);
+ void PrintProcessorState();
+ void PrintWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
+ void LogSystemRegisters() {
+ if (log_parameters_ & LOG_SYS_REGS) PrintSystemRegisters();
+ }
+ void LogRegisters() {
+ if (log_parameters_ & LOG_REGS) PrintRegisters();
+ }
+ void LogFPRegisters() {
+ if (log_parameters_ & LOG_FP_REGS) PrintFPRegisters();
+ }
+ void LogProcessorState() {
+ LogSystemRegisters();
+ LogRegisters();
+ LogFPRegisters();
+ }
+ void LogWrite(uint8_t* address, uint64_t value, unsigned num_bytes) {
+ if (log_parameters_ & LOG_WRITE) PrintWrite(address, value, num_bytes);
+ }
+
+ int log_parameters() { return log_parameters_; }
+ void set_log_parameters(int new_parameters) {
+ log_parameters_ = new_parameters;
+ if (!decoder_) {
+ if (new_parameters & LOG_DISASM) {
+ PrintF("Run --debug-sim to dynamically turn on disassembler\n");
+ }
+ return;
+ }
+ if (new_parameters & LOG_DISASM) {
+ decoder_->InsertVisitorBefore(print_disasm_, this);
+ } else {
+ decoder_->RemoveVisitor(print_disasm_);
+ }
+ }
+
+ static inline const char* WRegNameForCode(unsigned code,
+ Reg31Mode mode = Reg31IsZeroRegister);
+ static inline const char* XRegNameForCode(unsigned code,
+ Reg31Mode mode = Reg31IsZeroRegister);
+ static inline const char* SRegNameForCode(unsigned code);
+ static inline const char* DRegNameForCode(unsigned code);
+ static inline const char* VRegNameForCode(unsigned code);
+ static inline int CodeFromName(const char* name);
+
+ protected:
+ // Simulation helpers ------------------------------------
+ bool ConditionPassed(Condition cond) {
+ SimSystemRegister& flags = nzcv();
+ switch (cond) {
+ case eq:
+ return flags.Z();
+ case ne:
+ return !flags.Z();
+ case hs:
+ return flags.C();
+ case lo:
+ return !flags.C();
+ case mi:
+ return flags.N();
+ case pl:
+ return !flags.N();
+ case vs:
+ return flags.V();
+ case vc:
+ return !flags.V();
+ case hi:
+ return flags.C() && !flags.Z();
+ case ls:
+ return !(flags.C() && !flags.Z());
+ case ge:
+ return flags.N() == flags.V();
+ case lt:
+ return flags.N() != flags.V();
+ case gt:
+ return !flags.Z() && (flags.N() == flags.V());
+ case le:
+ return !(!flags.Z() && (flags.N() == flags.V()));
+ case nv: // Fall through.
+ case al:
+ return true;
+ default:
+ UNREACHABLE();
+ return false;
+ }
+ }
+
+ bool ConditionFailed(Condition cond) {
+ return !ConditionPassed(cond);
+ }
+
+ void AddSubHelper(Instruction* instr, int64_t op2);
+ int64_t AddWithCarry(unsigned reg_size,
+ bool set_flags,
+ int64_t src1,
+ int64_t src2,
+ int64_t carry_in = 0);
+ void LogicalHelper(Instruction* instr, int64_t op2);
+ void ConditionalCompareHelper(Instruction* instr, int64_t op2);
+ void LoadStoreHelper(Instruction* instr,
+ int64_t offset,
+ AddrMode addrmode);
+ void LoadStorePairHelper(Instruction* instr, AddrMode addrmode);
+ uint8_t* LoadStoreAddress(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode);
+ void LoadStoreWriteBack(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode);
+ void CheckMemoryAccess(uint8_t* address, uint8_t* stack);
+
+ uint64_t MemoryRead(uint8_t* address, unsigned num_bytes);
+ uint8_t MemoryRead8(uint8_t* address);
+ uint16_t MemoryRead16(uint8_t* address);
+ uint32_t MemoryRead32(uint8_t* address);
+ float MemoryReadFP32(uint8_t* address);
+ uint64_t MemoryRead64(uint8_t* address);
+ double MemoryReadFP64(uint8_t* address);
+
+ void MemoryWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
+ void MemoryWrite32(uint8_t* address, uint32_t value);
+ void MemoryWriteFP32(uint8_t* address, float value);
+ void MemoryWrite64(uint8_t* address, uint64_t value);
+ void MemoryWriteFP64(uint8_t* address, double value);
+
+ int64_t ShiftOperand(unsigned reg_size,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount);
+ int64_t Rotate(unsigned reg_width,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount);
+ int64_t ExtendValue(unsigned reg_width,
+ int64_t value,
+ Extend extend_type,
+ unsigned left_shift = 0);
+
+ uint64_t ReverseBits(uint64_t value, unsigned num_bits);
+ uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode);
+
+ template <typename T>
+ T FPDefaultNaN() const;
+
+ void FPCompare(double val0, double val1);
+ double FPRoundInt(double value, FPRounding round_mode);
+ double FPToDouble(float value);
+ float FPToFloat(double value, FPRounding round_mode);
+ double FixedToDouble(int64_t src, int fbits, FPRounding round_mode);
+ double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode);
+ float FixedToFloat(int64_t src, int fbits, FPRounding round_mode);
+ float UFixedToFloat(uint64_t src, int fbits, FPRounding round_mode);
+ int32_t FPToInt32(double value, FPRounding rmode);
+ int64_t FPToInt64(double value, FPRounding rmode);
+ uint32_t FPToUInt32(double value, FPRounding rmode);
+ uint64_t FPToUInt64(double value, FPRounding rmode);
+
+ template <typename T>
+ T FPAdd(T op1, T op2);
+
+ template <typename T>
+ T FPDiv(T op1, T op2);
+
+ template <typename T>
+ T FPMax(T a, T b);
+
+ template <typename T>
+ T FPMaxNM(T a, T b);
+
+ template <typename T>
+ T FPMin(T a, T b);
+
+ template <typename T>
+ T FPMinNM(T a, T b);
+
+ template <typename T>
+ T FPMul(T op1, T op2);
+
+ template <typename T>
+ T FPMulAdd(T a, T op1, T op2);
+
+ template <typename T>
+ T FPSqrt(T op);
+
+ template <typename T>
+ T FPSub(T op1, T op2);
+
+ // Standard NaN processing.
+ template <typename T>
+ T FPProcessNaN(T op);
+
+ bool FPProcessNaNs(Instruction* instr);
+
+ template <typename T>
+ T FPProcessNaNs(T op1, T op2);
+
+ template <typename T>
+ T FPProcessNaNs3(T op1, T op2, T op3);
+
+ void CheckStackAlignment();
+
+ inline void CheckPCSComplianceAndRun();
+
+#ifdef DEBUG
+ // Corruption values should have their least significant byte cleared to
+ // allow the code of the register being corrupted to be inserted.
+ static const uint64_t kCallerSavedRegisterCorruptionValue =
+ 0xca11edc0de000000UL;
+ // This value is a NaN in both 32-bit and 64-bit FP.
+ static const uint64_t kCallerSavedFPRegisterCorruptionValue =
+ 0x7ff000007f801000UL;
+ // This value is a mix of 32/64-bits NaN and "verbose" immediate.
+ static const uint64_t kDefaultCPURegisterCorruptionValue =
+ 0x7ffbad007f8bad00UL;
+
+ void CorruptRegisters(CPURegList* list,
+ uint64_t value = kDefaultCPURegisterCorruptionValue);
+ void CorruptAllCallerSavedCPURegisters();
+#endif
+
+ // Processor state ---------------------------------------
+
+ // Output stream.
+ FILE* stream_;
+ PrintDisassembler* print_disasm_;
+
+ // Instrumentation.
+ Instrument* instrument_;
+
+ // General purpose registers. Register 31 is the stack pointer.
+ SimRegister registers_[kNumberOfRegisters];
+
+ // Floating point registers
+ SimFPRegister fpregisters_[kNumberOfFPRegisters];
+
+ // Processor state
+ // bits[31, 27]: Condition flags N, Z, C, and V.
+ // (Negative, Zero, Carry, Overflow)
+ SimSystemRegister nzcv_;
+
+ // Floating-Point Control Register
+ SimSystemRegister fpcr_;
+
+ // Only a subset of FPCR features are supported by the simulator. This helper
+ // checks that the FPCR settings are supported.
+ //
+ // This is checked when floating-point instructions are executed, not when
+ // FPCR is set. This allows generated code to modify FPCR for external
+ // functions, or to save and restore it when entering and leaving generated
+ // code.
+ void AssertSupportedFPCR() {
+ ASSERT(fpcr().FZ() == 0); // No flush-to-zero support.
+ ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
+
+ // The simulator does not support half-precision operations so fpcr().AHP()
+ // is irrelevant, and is not checked here.
+ }
+
+ static int CalcNFlag(uint64_t result, unsigned reg_size) {
+ return (result >> (reg_size - 1)) & 1;
+ }
+
+ static int CalcZFlag(uint64_t result) {
+ return result == 0;
+ }
+
+ static const uint32_t kConditionFlagsMask = 0xf0000000;
+
+ // Stack
+ byte* stack_;
+ static const intptr_t stack_protection_size_ = KB;
+ intptr_t stack_size_;
+ byte* stack_limit_;
+
+ Decoder<DispatchingDecoderVisitor>* decoder_;
+ Decoder<DispatchingDecoderVisitor>* disassembler_decoder_;
+
+ // Indicates if the pc has been modified by the instruction and should not be
+ // automatically incremented.
+ bool pc_modified_;
+ Instruction* pc_;
+
+ static const char* xreg_names[];
+ static const char* wreg_names[];
+ static const char* sreg_names[];
+ static const char* dreg_names[];
+ static const char* vreg_names[];
+
+ // Debugger input.
+ void set_last_debugger_input(char* input) {
+ DeleteArray(last_debugger_input_);
+ last_debugger_input_ = input;
+ }
+ char* last_debugger_input() { return last_debugger_input_; }
+ char* last_debugger_input_;
+
+ private:
+ void Init(FILE* stream);
+
+ int log_parameters_;
+ Isolate* isolate_;
+};
+
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->CallJS( \
+ FUNCTION_ADDR(entry), \
+ p0, p1, p2, p3, p4))
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+ Simulator::current(Isolate::Current())->CallRegExp( \
+ entry, \
+ p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ try_catch_address == NULL ? \
+ NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
+
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code.
+// See also 'class SimulatorStack' in arm/simulator-arm.h.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+ static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+ uintptr_t c_limit) {
+ return Simulator::current(isolate)->StackLimit();
+ }
+
+ static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(Isolate::Current());
+ return sim->PushAddress(try_catch_address);
+ }
+
+ static void UnregisterCTryCatch() {
+ Simulator::current(Isolate::Current())->PopAddress();
+ }
+};
+
+#endif // !defined(USE_SIMULATOR)
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_SIMULATOR_ARM64_H_
diff --git a/deps/v8/src/arm64/stub-cache-arm64.cc b/deps/v8/src/arm64/stub-cache-arm64.cc
new file mode 100644
index 000000000..1b2e95993
--- /dev/null
+++ b/deps/v8/src/arm64/stub-cache-arm64.cc
@@ -0,0 +1,1496 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "ic-inl.h"
+#include "codegen.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(!AreAliased(receiver, scratch0, scratch1));
+ ASSERT(name->IsUniqueName());
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+ Label done;
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ Register map = scratch1;
+ __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ Tst(scratch0, kInterceptorOrAccessCheckNeededMask);
+ __ B(ne, miss_label);
+
+ // Check that receiver is a JSObject.
+ __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE);
+ __ B(lt, miss_label);
+
+ // Load properties array.
+ Register properties = scratch0;
+ __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ // Check that the properties array is a dictionary.
+ __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label);
+
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm,
+ miss_label,
+ &done,
+ receiver,
+ properties,
+ name,
+ scratch1);
+ __ Bind(&done);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+}
+
+
+// Probe primary or secondary table.
+// If the entry is found in the cache, the generated code jump to the first
+// instruction of the stub in the cache.
+// If there is a miss the code fall trough.
+//
+// 'receiver', 'name' and 'offset' registers are preserved on miss.
+static void ProbeTable(Isolate* isolate,
+ MacroAssembler* masm,
+ Code::Flags flags,
+ StubCache::Table table,
+ Register receiver,
+ Register name,
+ Register offset,
+ Register scratch,
+ Register scratch2,
+ Register scratch3) {
+ // Some code below relies on the fact that the Entry struct contains
+ // 3 pointers (name, code, map).
+ STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
+
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
+ uintptr_t value_off_addr =
+ reinterpret_cast<uintptr_t>(value_offset.address());
+ uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
+
+ Label miss;
+
+ ASSERT(!AreAliased(name, offset, scratch, scratch2, scratch3));
+
+ // Multiply by 3 because there are 3 fields per entry.
+ __ Add(scratch3, offset, Operand(offset, LSL, 1));
+
+ // Calculate the base address of the entry.
+ __ Mov(scratch, key_offset);
+ __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2));
+
+ // Check that the key in the entry matches the name.
+ __ Ldr(scratch2, MemOperand(scratch));
+ __ Cmp(name, scratch2);
+ __ B(ne, &miss);
+
+ // Check the map matches.
+ __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
+ __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Cmp(scratch2, scratch3);
+ __ B(ne, &miss);
+
+ // Get the code entry from the cache.
+ __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
+
+ // Check that the flags match what we're looking for.
+ __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
+ __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup);
+ __ Cmp(scratch2.W(), flags);
+ __ B(ne, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ B(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ B(&miss);
+ }
+#endif
+
+ // Jump to the first instruction in the code stub.
+ __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(scratch);
+
+ // Miss: fall through.
+ __ Bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+ Code::Flags flags,
+ Register receiver,
+ Register name,
+ Register scratch,
+ Register extra,
+ Register extra2,
+ Register extra3) {
+ Isolate* isolate = masm->isolate();
+ Label miss;
+
+ // Make sure the flags does not name a specific type.
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ ASSERT(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
+
+ // Make sure extra and extra2 registers are valid.
+ ASSERT(!extra.is(no_reg));
+ ASSERT(!extra2.is(no_reg));
+ ASSERT(!extra3.is(no_reg));
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
+ extra2, extra3);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Compute the hash for primary table.
+ __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+ __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Add(scratch, scratch, extra);
+ __ Eor(scratch, scratch, flags);
+ // We shift out the last two bits because they are not part of the hash.
+ __ Ubfx(scratch, scratch, kHeapObjectTagSize,
+ CountTrailingZeros(kPrimaryTableSize, 64));
+
+ // Probe the primary table.
+ ProbeTable(isolate, masm, flags, kPrimary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Primary miss: Compute hash for secondary table.
+ __ Sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
+ __ Add(scratch, scratch, flags >> kHeapObjectTagSize);
+ __ And(scratch, scratch, kSecondaryTableSize - 1);
+
+ // Probe the secondary table.
+ ProbeTable(isolate, masm, flags, kSecondary, receiver, name,
+ scratch, extra, extra2, extra3);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ Bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
+ extra2, extra3);
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+ int index,
+ Register prototype) {
+ // Load the global or builtins object from the current context.
+ __ Ldr(prototype, GlobalObjectMemOperand());
+ // Load the native context from the global or builtins object.
+ __ Ldr(prototype,
+ FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
+ __ Ldr(prototype, ContextMemOperand(prototype, index));
+ // Load the initial map. The global functions all have initial maps.
+ __ Ldr(prototype,
+ FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the prototype from the initial map.
+ __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss) {
+ Isolate* isolate = masm->isolate();
+ // Get the global function with the given index.
+ Handle<JSFunction> function(
+ JSFunction::cast(isolate->native_context()->get(index)));
+
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ __ Ldr(scratch, GlobalObjectMemOperand());
+ __ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ Ldr(scratch, ContextMemOperand(scratch, index));
+ __ Cmp(scratch, Operand(function));
+ __ B(ne, miss);
+
+ // Load its initial map. The global functions all have initial maps.
+ __ Mov(prototype, Operand(Handle<Map>(function->initial_map())));
+ // Load the prototype from the initial map.
+ __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!representation.IsDouble());
+ USE(representation);
+ if (inobject) {
+ int offset = index * kPointerSize;
+ __ Ldr(dst, FieldMemOperand(src, offset));
+ } else {
+ // Calculate the offset into the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ Ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
+ __ Ldr(dst, FieldMemOperand(dst, offset));
+ }
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label) {
+ ASSERT(!AreAliased(receiver, scratch));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss_label);
+
+ // Check that the object is a JS array.
+ __ JumpIfNotObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE,
+ miss_label);
+
+ // Load length directly from the JS array.
+ __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Ret();
+}
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+ // TryGetFunctionPrototype can't put the result directly in x0 because the
+ // 3 inputs registers can't alias and we call this function from
+ // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly
+ // move the result in x0.
+ __ Mov(x0, scratch1);
+ __ Ret();
+}
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
+ Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ ASSERT(cell->value()->IsTheHole());
+ __ Mov(scratch, Operand(cell));
+ __ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+ __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
+}
+
+
+void StoreStubCompiler::GenerateNegativeHolderLookup(
+ MacroAssembler* masm,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss, holder_reg, name, scratch1(), scratch2());
+ }
+}
+
+
+// Generate StoreTransition code, value is passed in x0 register.
+// When leaving generated code after success, the receiver_reg and storage_reg
+// may be clobbered. Upon branch to miss_label, the receiver and name registers
+// have their original values.
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss_label,
+ Label* slow) {
+ Label exit;
+
+ ASSERT(!AreAliased(receiver_reg, storage_reg, value_reg,
+ scratch1, scratch2, scratch3));
+
+ // We don't need scratch3.
+ scratch3 = NoReg;
+
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
+ __ LoadObject(scratch1, constant);
+ __ Cmp(value_reg, scratch1);
+ __ B(ne, miss_label);
+ } else if (representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ } else if (representation.IsDouble()) {
+ UseScratchRegisterScope temps(masm);
+ DoubleRegister temp_double = temps.AcquireD();
+ __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
+
+ Label do_store, heap_number;
+ __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2);
+
+ __ JumpIfSmi(value_reg, &do_store);
+
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ Bind(&do_store);
+ __ Str(temp_double, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
+ }
+
+ // Stub never generated for non-global objects that require access checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ // Perform map transition for the receiver if necessary.
+ if ((details.type() == FIELD) &&
+ (object->map()->unused_property_fields() == 0)) {
+ // The properties must be extended before we can store the value.
+ // We jump to a runtime call that extends the properties array.
+ __ Mov(scratch1, Operand(transition));
+ __ Push(receiver_reg, scratch1, value_reg);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ masm->isolate()),
+ 3,
+ 1);
+ return;
+ }
+
+ // Update the map of the object.
+ __ Mov(scratch1, Operand(transition));
+ __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+ // Update the write barrier for the map field.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ scratch2,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ if (details.type() == CONSTANT) {
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+ return;
+ }
+
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ Register prop_reg = representation.IsDouble() ? storage_reg : value_reg;
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ Str(prop_reg, FieldMemOperand(receiver_reg, offset));
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ Mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(receiver_reg,
+ offset,
+ storage_reg,
+ scratch1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array
+ __ Ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ Str(prop_reg, FieldMemOperand(scratch1, offset));
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ Mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(scratch1,
+ offset,
+ storage_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ }
+
+ __ Bind(&exit);
+ // Return the value (register x0).
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+}
+
+
+// Generate StoreField code, value is passed in x0 register.
+// When leaving generated code after success, the receiver_reg and name_reg may
+// be clobbered. Upon branch to miss_label, the receiver and name registers have
+// their original values.
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // x0 : value
+ Label exit;
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ int index = lookup->GetFieldIndex().field_index();
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ } else if (representation.IsDouble()) {
+ UseScratchRegisterScope temps(masm);
+ DoubleRegister temp_double = temps.AcquireD();
+
+ __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
+
+ // Load the double storage.
+ if (index < 0) {
+ int offset = (index * kPointerSize) + object->map()->instance_size();
+ __ Ldr(scratch1, FieldMemOperand(receiver_reg, offset));
+ } else {
+ int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
+ __ Ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ Ldr(scratch1, FieldMemOperand(scratch1, offset));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+
+ __ JumpIfSmi(value_reg, &do_store);
+
+ __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ Bind(&do_store);
+ __ Str(temp_double, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
+
+ // Return the value (register x0).
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+ return;
+ }
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check = representation.IsTagged()
+ ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ Str(value_reg, FieldMemOperand(receiver_reg, offset));
+
+ if (!representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ Mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array
+ __ Ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ Str(value_reg, FieldMemOperand(scratch1, offset));
+
+ if (!representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ Mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ smi_check);
+ }
+ }
+
+ __ Bind(&exit);
+ // Return the value (register x0).
+ ASSERT(value_reg.is(x0));
+ __ Ret();
+}
+
+
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ Bind(label);
+ __ Mov(this->name(), Operand(name));
+ }
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
+
+ __ Push(name);
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ Register scratch = name;
+ __ Mov(scratch, Operand(interceptor));
+ __ Push(scratch, receiver, holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ Handle<JSObject> holder_obj,
+ IC::UtilityId id) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ __ CallExternalReference(
+ ExternalReference(IC_Utility(id), masm->isolate()),
+ StubCache::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch,
+ bool is_store,
+ int argc,
+ Register* values) {
+ ASSERT(!AreAliased(receiver, scratch));
+
+ MacroAssembler::PushPopQueue queue(masm);
+ queue.Queue(receiver);
+ // Write the arguments to the stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc-1-i];
+ ASSERT(!AreAliased(receiver, scratch, arg));
+ queue.Queue(arg);
+ }
+ queue.PushQueued();
+
+ ASSERT(optimization.is_simple_api_call());
+
+ // Abi for CallApiFunctionStub.
+ Register callee = x0;
+ Register call_data = x4;
+ Register holder = x2;
+ Register api_function_address = x1;
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder =
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Mov(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ LoadObject(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
+
+ Isolate* isolate = masm->isolate();
+ Handle<JSFunction> function = optimization.constant_function();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ LoadObject(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ LoadObject(call_data, api_call_info);
+ __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ } else {
+ __ LoadObject(call_data, call_data_obj);
+ }
+
+ // Put api_function_address in place.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ ApiFunction fun(function_address);
+ ExternalReference ref = ExternalReference(&fun,
+ ExternalReference::DIRECT_API_CALL,
+ masm->isolate());
+ __ Mov(api_function_address, ref);
+
+ // Jump to stub.
+ CallApiFunctionStub stub(is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
+}
+
+
+void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ Handle<Name> name,
+ Label* miss,
+ PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
+
+ // object_reg and holder_reg registers can alias.
+ ASSERT(!AreAliased(object_reg, scratch1, scratch2));
+ ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type->IsConstant()) {
+ current = Handle<JSObject>::cast(type->AsConstant());
+ }
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
+ ++depth;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap() &&
+ !current_map->IsJSGlobalProxyMap()) {
+ if (!name->IsUniqueName()) {
+ ASSERT(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
+ }
+ ASSERT(current.is_null() ||
+ (current->property_dictionary()->FindEntry(*name) ==
+ NameDictionary::kNotFound));
+
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
+ scratch1, scratch2);
+
+ __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // From now on the object will be in holder_reg.
+ __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ bool need_map = (depth != 1 || check == CHECK_ALL_MAPS) ||
+ heap()->InNewSpace(*prototype);
+ Register map_reg = NoReg;
+ if (need_map) {
+ map_reg = scratch1;
+ __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
+
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
+ __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ if (current_map->IsJSGlobalProxyMap()) {
+ UseScratchRegisterScope temps(masm());
+ __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(
+ masm(), Handle<JSGlobalObject>::cast(current), name,
+ scratch2, miss);
+ }
+
+ reg = holder_reg; // From now on the object will be in holder_reg.
+
+ if (heap()->InNewSpace(*prototype)) {
+ // The prototype is in new space; we cannot store a reference to it
+ // in the code. Load it from the map.
+ __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ __ Mov(reg, Operand(prototype));
+ }
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ current_map = handle(current->map());
+ }
+
+ // Log the check depth.
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+ // Check the holder map.
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Perform security check for access to the global object.
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+ }
+
+ // Return the register containing the holder.
+ return reg;
+}
+
+
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ B(&success);
+
+ __ Bind(miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ __ Bind(&success);
+ }
+}
+
+
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ B(&success);
+
+ GenerateRestoreName(masm(), miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ __ Bind(&success);
+ }
+}
+
+
+Register LoadStubCompiler::CallbackHandlerFrontend(Handle<HeapType> type,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<Object> callback) {
+ Label miss;
+
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
+ // HandlerFrontendHeader can return its result into scratch1() so do not
+ // use it.
+ Register scratch2 = this->scratch2();
+ Register scratch3 = this->scratch3();
+ Register dictionary = this->scratch4();
+ ASSERT(!AreAliased(reg, scratch2, scratch3, dictionary));
+
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ // Load the properties dictionary.
+ __ Ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ &miss,
+ &probe_done,
+ dictionary,
+ this->name(),
+ scratch2,
+ scratch3);
+ __ Bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // pointer into the dictionary. Check that the value is the callback.
+ Register pointer = scratch3;
+ const int kElementsStartOffset = NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ Ldr(scratch2, FieldMemOperand(pointer, kValueOffset));
+ __ Cmp(scratch2, Operand(callback));
+ __ B(ne, &miss);
+ }
+
+ HandlerFrontendFooter(name, &miss);
+ return reg;
+}
+
+
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation) {
+ __ Mov(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ } else {
+ KeyedLoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ }
+}
+
+
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+ // Return the constant value.
+ __ LoadObject(x0, value);
+ __ Ret();
+}
+
+
+void LoadStubCompiler::GenerateLoadCallback(
+ Register reg,
+ Handle<ExecutableAccessorInfo> callback) {
+ ASSERT(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
+
+ // Build ExecutableAccessorInfo::args_ list on the stack and push property
+ // name below the exit frame to make GC aware of them and store pointers to
+ // them.
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+
+ __ Push(receiver());
+
+ if (heap()->InNewSpace(callback->data())) {
+ __ Mov(scratch3(), Operand(callback));
+ __ Ldr(scratch3(), FieldMemOperand(scratch3(),
+ ExecutableAccessorInfo::kDataOffset));
+ } else {
+ __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate())));
+ }
+ __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
+ __ Mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
+ __ Push(scratch3(), scratch4(), scratch4(), scratch2(), reg, name());
+
+ Register args_addr = scratch2();
+ __ Add(args_addr, __ StackPointer(), kPointerSize);
+
+ // Stack at this point:
+ // sp[40] callback data
+ // sp[32] undefined
+ // sp[24] undefined
+ // sp[16] isolate
+ // args_addr -> sp[8] reg
+ // sp[0] name
+
+ // Abi for CallApiGetter.
+ Register getter_address_reg = x2;
+
+ // Set up the call.
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
+ ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+ ExternalReference ref = ExternalReference(&fun, type, isolate());
+ __ Mov(getter_address_reg, ref);
+
+ CallApiGetterStub stub;
+ __ TailCallStub(&stub);
+}
+
+
+void LoadStubCompiler::GenerateLoadInterceptor(
+ Register holder_reg,
+ Handle<Object> object,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<Name> name) {
+ ASSERT(!AreAliased(receiver(), this->name(),
+ scratch1(), scratch2(), scratch3()));
+ ASSERT(interceptor_holder->HasNamedInterceptor());
+ ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added later.
+ bool compile_followup_inline = false;
+ if (lookup->IsFound() && lookup->IsCacheable()) {
+ if (lookup->IsField()) {
+ compile_followup_inline = true;
+ } else if (lookup->type() == CALLBACKS &&
+ lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* callback =
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
+ compile_followup_inline = callback->getter() != NULL &&
+ callback->IsCompatibleReceiver(*object);
+ }
+ }
+
+ if (compile_followup_inline) {
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from
+ // the holder and it is needed should the interceptor return without any
+ // result. The CALLBACKS case needs the receiver to be passed into C++ code,
+ // the FIELD case might cause a miss during the prototype check.
+ bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
+ bool must_preserve_receiver_reg = !receiver().Is(holder_reg) &&
+ (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ if (must_preserve_receiver_reg) {
+ __ Push(receiver(), holder_reg, this->name());
+ } else {
+ __ Push(holder_reg, this->name());
+ }
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder,
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ JumpIfRoot(x0,
+ Heap::kNoInterceptorResultSentinelRootIndex,
+ &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
+
+ __ Bind(&interceptor_failed);
+ if (must_preserve_receiver_reg) {
+ __ Pop(this->name(), holder_reg, receiver());
+ } else {
+ __ Pop(this->name(), holder_reg);
+ }
+ // Leave the internal frame.
+ }
+ GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
+ } else { // !compile_followup_inline
+ // Call the runtime system to load the interceptor.
+ // Check that the maps haven't changed.
+ PushInterceptorArguments(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
+ isolate());
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
+ }
+}
+
+
+void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
+ UseScratchRegisterScope temps(masm());
+ // Check that the object is a boolean.
+ Register true_root = temps.AcquireX();
+ Register false_root = temps.AcquireX();
+ ASSERT(!AreAliased(object, true_root, false_root));
+ __ LoadTrueFalseRoots(true_root, false_root);
+ __ Cmp(object, true_root);
+ __ Ccmp(object, false_root, ZFlag, ne);
+ __ B(ne, miss);
+}
+
+
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ ASM_LOCATION("StoreStubCompiler::CompileStoreCallback");
+ Register holder_reg = HandlerFrontend(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
+
+ // Stub never generated for non-global objects that require access checks.
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+
+ // receiver() and holder_reg can alias.
+ ASSERT(!AreAliased(receiver(), scratch1(), scratch2(), value()));
+ ASSERT(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
+ __ Mov(scratch1(), Operand(callback));
+ __ Mov(scratch2(), Operand(name));
+ __ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+ __ TailCallExternalReference(store_callback_property, 5, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void StoreStubCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ Push(value());
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ Ldr(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver, value());
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ Pop(x0);
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name) {
+ Label miss;
+
+ ASM_LOCATION("StoreStubCompiler::CompileStoreInterceptor");
+
+ __ Push(receiver(), this->name(), value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property =
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
+ __ TailCallExternalReference(store_ic_property, 3, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
+ NonexistentHandlerFrontend(type, last, name);
+
+ // Return undefined if maps of the full prototype chain are still the
+ // same and no global property with this name contains a value.
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ Ret();
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+// TODO(all): The so-called scratch registers are significant in some cases. For
+// example, KeyedStoreStubCompiler::registers()[3] (x3) is actually used for
+// KeyedStoreCompiler::transition_map(). We should verify which registers are
+// actually scratch registers, and which are important. For now, we use the same
+// assignments as ARM to remain on the safe side.
+
+Register* LoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { x0, x2, x3, x1, x4, x5 };
+ return registers;
+}
+
+
+Register* KeyedLoadStubCompiler::registers() {
+ // receiver, name/key, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { x1, x0, x2, x3, x4, x5 };
+ return registers;
+}
+
+
+Register StoreStubCompiler::value() {
+ return x0;
+}
+
+
+Register* StoreStubCompiler::registers() {
+ // receiver, value, scratch1, scratch2, scratch3.
+ static Register registers[] = { x1, x2, x3, x4, x5 };
+ return registers;
+}
+
+
+Register* KeyedStoreStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { x2, x1, x3, x4, x5 };
+ return registers;
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<HeapType> type,
+ Register receiver,
+ Handle<JSFunction> getter) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ Ldr(receiver,
+ FieldMemOperand(
+ receiver, JSGlobalObject::kGlobalReceiverOffset));
+ }
+ __ Push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual,
+ CALL_FUNCTION, NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> LoadStubCompiler::CompileLoadGlobal(
+ Handle<HeapType> type,
+ Handle<GlobalObject> global,
+ Handle<PropertyCell> cell,
+ Handle<Name> name,
+ bool is_dont_delete) {
+ Label miss;
+ HandlerFrontendHeader(type, receiver(), global, name, &miss);
+
+ // Get the value from the cell.
+ __ Mov(x3, Operand(cell));
+ __ Ldr(x4, FieldMemOperand(x3, Cell::kValueOffset));
+
+ // Check for deleted property if property can actually be deleted.
+ if (!is_dont_delete) {
+ __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &miss);
+ }
+
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
+ __ Mov(x0, x4);
+ __ Ret();
+
+ HandlerFrontendFooter(name, &miss);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
+ TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
+ }
+
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
+ Register map_reg = scratch1();
+ __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ Label try_next;
+ __ Cmp(map_reg, Operand(map));
+ __ B(ne, &try_next);
+ if (type->Is(HeapType::Number())) {
+ ASSERT(!number_case.is_unused());
+ __ Bind(&number_case);
+ }
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
+ __ Bind(&try_next);
+ }
+ }
+ ASSERT(number_of_handled_maps != 0);
+
+ __ Bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC;
+ return GetICCode(kind(), type, name, state);
+}
+
+
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver(), value());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
+ MapHandleList* receiver_maps,
+ CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+
+ ASM_LOCATION("KeyedStoreStubCompiler::CompileStorePolymorphic");
+
+ __ JumpIfSmi(receiver(), &miss);
+
+ int receiver_count = receiver_maps->length();
+ __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int i = 0; i < receiver_count; i++) {
+ __ Cmp(scratch1(), Operand(receiver_maps->at(i)));
+
+ Label skip;
+ __ B(&skip, ne);
+ if (!transitioned_maps->at(i).is_null()) {
+ // This argument is used by the handler stub. For example, see
+ // ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
+ __ Mov(transition_map(), Operand(transitioned_maps->at(i)));
+ }
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ Bind(&skip);
+ }
+
+ __ Bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ return GetICCode(
+ kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- x0 : key
+ // -- x1 : receiver
+ // -----------------------------------
+ Label slow, miss;
+
+ Register result = x0;
+ Register key = x0;
+ Register receiver = x1;
+
+ __ JumpIfNotSmi(key, &miss);
+ __ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ LoadFromNumberDictionary(&slow, x4, key, result, x2, x3, x5, x6);
+ __ Ret();
+
+ __ Bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x2, x3);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
+
+ // Miss case, call the runtime.
+ __ Bind(&miss);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/utils-arm64.cc b/deps/v8/src/arm64/utils-arm64.cc
new file mode 100644
index 000000000..e2589f42e
--- /dev/null
+++ b/deps/v8/src/arm64/utils-arm64.cc
@@ -0,0 +1,112 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "arm64/utils-arm64.h"
+
+
+namespace v8 {
+namespace internal {
+
+#define __ assm->
+
+
+int CountLeadingZeros(uint64_t value, int width) {
+ // TODO(jbramley): Optimize this for ARM64 hosts.
+ ASSERT((width == 32) || (width == 64));
+ int count = 0;
+ uint64_t bit_test = 1UL << (width - 1);
+ while ((count < width) && ((bit_test & value) == 0)) {
+ count++;
+ bit_test >>= 1;
+ }
+ return count;
+}
+
+
+int CountLeadingSignBits(int64_t value, int width) {
+ // TODO(jbramley): Optimize this for ARM64 hosts.
+ ASSERT((width == 32) || (width == 64));
+ if (value >= 0) {
+ return CountLeadingZeros(value, width) - 1;
+ } else {
+ return CountLeadingZeros(~value, width) - 1;
+ }
+}
+
+
+int CountTrailingZeros(uint64_t value, int width) {
+ // TODO(jbramley): Optimize this for ARM64 hosts.
+ ASSERT((width == 32) || (width == 64));
+ int count = 0;
+ while ((count < width) && (((value >> count) & 1) == 0)) {
+ count++;
+ }
+ return count;
+}
+
+
+int CountSetBits(uint64_t value, int width) {
+ // TODO(jbramley): Would it be useful to allow other widths? The
+ // implementation already supports them.
+ ASSERT((width == 32) || (width == 64));
+
+ // Mask out unused bits to ensure that they are not counted.
+ value &= (0xffffffffffffffffUL >> (64-width));
+
+ // Add up the set bits.
+ // The algorithm works by adding pairs of bit fields together iteratively,
+ // where the size of each bit field doubles each time.
+ // An example for an 8-bit value:
+ // Bits: h g f e d c b a
+ // \ | \ | \ | \ |
+ // value = h+g f+e d+c b+a
+ // \ | \ |
+ // value = h+g+f+e d+c+b+a
+ // \ |
+ // value = h+g+f+e+d+c+b+a
+ value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
+ value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
+ value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
+ value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
+ value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
+ value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
+
+ return value;
+}
+
+
+int MaskToBit(uint64_t mask) {
+ ASSERT(CountSetBits(mask, 64) == 1);
+ return CountTrailingZeros(mask, 64);
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/utils-arm64.h b/deps/v8/src/arm64/utils-arm64.h
new file mode 100644
index 000000000..a1fa12cfa
--- /dev/null
+++ b/deps/v8/src/arm64/utils-arm64.h
@@ -0,0 +1,135 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_UTILS_ARM64_H_
+#define V8_ARM64_UTILS_ARM64_H_
+
+#include <cmath>
+#include "v8.h"
+#include "arm64/constants-arm64.h"
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+namespace v8 {
+namespace internal {
+
+// These are global assumptions in v8.
+STATIC_ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
+STATIC_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
+
+// Floating point representation.
+static inline uint32_t float_to_rawbits(float value) {
+ uint32_t bits = 0;
+ memcpy(&bits, &value, 4);
+ return bits;
+}
+
+
+static inline uint64_t double_to_rawbits(double value) {
+ uint64_t bits = 0;
+ memcpy(&bits, &value, 8);
+ return bits;
+}
+
+
+static inline float rawbits_to_float(uint32_t bits) {
+ float value = 0.0;
+ memcpy(&value, &bits, 4);
+ return value;
+}
+
+
+static inline double rawbits_to_double(uint64_t bits) {
+ double value = 0.0;
+ memcpy(&value, &bits, 8);
+ return value;
+}
+
+
+// Bit counting.
+int CountLeadingZeros(uint64_t value, int width);
+int CountLeadingSignBits(int64_t value, int width);
+int CountTrailingZeros(uint64_t value, int width);
+int CountSetBits(uint64_t value, int width);
+int MaskToBit(uint64_t mask);
+
+
+// NaN tests.
+inline bool IsSignallingNaN(double num) {
+ uint64_t raw = double_to_rawbits(num);
+ if (std::isnan(num) && ((raw & kDQuietNanMask) == 0)) {
+ return true;
+ }
+ return false;
+}
+
+
+inline bool IsSignallingNaN(float num) {
+ uint32_t raw = float_to_rawbits(num);
+ if (std::isnan(num) && ((raw & kSQuietNanMask) == 0)) {
+ return true;
+ }
+ return false;
+}
+
+
+template <typename T>
+inline bool IsQuietNaN(T num) {
+ return std::isnan(num) && !IsSignallingNaN(num);
+}
+
+
+// Convert the NaN in 'num' to a quiet NaN.
+inline double ToQuietNaN(double num) {
+ ASSERT(isnan(num));
+ return rawbits_to_double(double_to_rawbits(num) | kDQuietNanMask);
+}
+
+
+inline float ToQuietNaN(float num) {
+ ASSERT(isnan(num));
+ return rawbits_to_float(float_to_rawbits(num) | kSQuietNanMask);
+}
+
+
+// Fused multiply-add.
+inline double FusedMultiplyAdd(double op1, double op2, double a) {
+ return fma(op1, op2, a);
+}
+
+
+inline float FusedMultiplyAdd(float op1, float op2, float a) {
+ return fmaf(op1, op2, a);
+}
+
+} } // namespace v8::internal
+
+#endif // V8_ARM64_UTILS_ARM64_H_
diff --git a/deps/v8/src/array-iterator.js b/deps/v8/src/array-iterator.js
index a8c5e001c..3af659dbc 100644
--- a/deps/v8/src/array-iterator.js
+++ b/deps/v8/src/array-iterator.js
@@ -36,9 +36,9 @@ var ARRAY_ITERATOR_KIND_VALUES = 2;
var ARRAY_ITERATOR_KIND_ENTRIES = 3;
// The spec draft also has "sparse" but it is never used.
-var iteratorObjectSymbol = NEW_PRIVATE("iterator_object");
-var arrayIteratorNextIndexSymbol = NEW_PRIVATE("iterator_next");
-var arrayIterationKindSymbol = NEW_PRIVATE("iterator_kind");
+var arrayIteratorObjectSymbol = GLOBAL_PRIVATE("ArrayIterator#object");
+var arrayIteratorNextIndexSymbol = GLOBAL_PRIVATE("ArrayIterator#next");
+var arrayIterationKindSymbol = GLOBAL_PRIVATE("ArrayIterator#kind");
function ArrayIterator() {}
@@ -46,7 +46,7 @@ function ArrayIterator() {}
function CreateArrayIterator(array, kind) {
var object = ToObject(array);
var iterator = new ArrayIterator;
- SET_PRIVATE(iterator, iteratorObjectSymbol, object);
+ SET_PRIVATE(iterator, arrayIteratorObjectSymbol, object);
SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, 0);
SET_PRIVATE(iterator, arrayIterationKindSymbol, kind);
return iterator;
@@ -60,7 +60,7 @@ function CreateIteratorResultObject(value, done) {
// 15.4.5.2.2 ArrayIterator.prototype.next( )
function ArrayIteratorNext() {
var iterator = ToObject(this);
- var array = GET_PRIVATE(iterator, iteratorObjectSymbol);
+ var array = GET_PRIVATE(iterator, arrayIteratorObjectSymbol);
if (!array) {
throw MakeTypeError('incompatible_method_receiver',
['Array Iterator.prototype.next']);
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index 372b7ece6..e48230e2b 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -1115,8 +1115,8 @@ function ArraySort(comparefn) {
max_prototype_element = CopyFromPrototype(this, length);
}
- var num_non_undefined = %IsObserved(this) ?
- -1 : %RemoveArrayHoles(this, length);
+ // %RemoveArrayHoles returns -1 if fast removal is not supported.
+ var num_non_undefined = %RemoveArrayHoles(this, length);
if (num_non_undefined == -1) {
// The array is observed, or there were indexed accessors in the array.
@@ -1153,7 +1153,7 @@ function ArrayFilter(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
@@ -1201,7 +1201,7 @@ function ArrayForEach(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
@@ -1242,7 +1242,7 @@ function ArraySome(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
@@ -1282,7 +1282,7 @@ function ArrayEvery(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
@@ -1321,7 +1321,7 @@ function ArrayMap(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(f)) {
receiver = ToObject(receiver);
}
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 436d035c3..772b6d696 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -59,6 +59,8 @@
#include "ia32/assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_X64
#include "x64/assembler-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/assembler-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS
@@ -73,6 +75,8 @@
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/regexp-macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/regexp-macro-assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -122,7 +126,6 @@ AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
-
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
@@ -283,9 +286,12 @@ int Label::pos() const {
// 00 [4 bit middle_tag] 11 followed by
// 00 [6 bit pc delta]
//
-// 1101: constant pool. Used on ARM only for now.
-// The format is: 11 1101 11
-// signed int (size of the constant pool).
+// 1101: constant or veneer pool. Used only on ARM and ARM64 for now.
+// The format is: [2-bit sub-type] 1101 11
+// signed int (size of the pool).
+// The 2-bit sub-types are:
+// 00: constant pool
+// 01: veneer pool
// 1110: long_data_record
// The format is: [2-bit data_type_tag] 1110 11
// signed intptr_t, lowest byte written first
@@ -342,8 +348,9 @@ const int kNonstatementPositionTag = 1;
const int kStatementPositionTag = 2;
const int kCommentTag = 3;
-const int kConstPoolExtraTag = kPCJumpExtraTag - 2;
-const int kConstPoolTag = 3;
+const int kPoolExtraTag = kPCJumpExtraTag - 2;
+const int kConstPoolTag = 0;
+const int kVeneerPoolTag = 1;
uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
@@ -403,8 +410,8 @@ void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
}
-void RelocInfoWriter::WriteExtraTaggedConstPoolData(int data) {
- WriteExtraTag(kConstPoolExtraTag, kConstPoolTag);
+void RelocInfoWriter::WriteExtraTaggedPoolData(int data, int pool_type) {
+ WriteExtraTag(kPoolExtraTag, pool_type);
for (int i = 0; i < kIntSize; i++) {
*--pos_ = static_cast<byte>(data);
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
@@ -476,9 +483,11 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
WriteExtraTaggedData(rinfo->data(), kCommentTag);
ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
- } else if (RelocInfo::IsConstPool(rmode)) {
+ } else if (RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode)) {
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
- WriteExtraTaggedConstPoolData(static_cast<int>(rinfo->data()));
+ WriteExtraTaggedPoolData(static_cast<int>(rinfo->data()),
+ RelocInfo::IsConstPool(rmode) ? kConstPoolTag
+ : kVeneerPoolTag);
} else {
ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM);
int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
@@ -529,7 +538,7 @@ void RelocIterator::AdvanceReadId() {
}
-void RelocIterator::AdvanceReadConstPoolData() {
+void RelocIterator::AdvanceReadPoolData() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
@@ -671,10 +680,13 @@ void RelocIterator::next() {
}
Advance(kIntptrSize);
}
- } else if ((extra_tag == kConstPoolExtraTag) &&
- (GetTopTag() == kConstPoolTag)) {
- if (SetMode(RelocInfo::CONST_POOL)) {
- AdvanceReadConstPoolData();
+ } else if (extra_tag == kPoolExtraTag) {
+ int pool_type = GetTopTag();
+ ASSERT(pool_type == kConstPoolTag || pool_type == kVeneerPoolTag);
+ RelocInfo::Mode rmode = (pool_type == kConstPoolTag) ?
+ RelocInfo::CONST_POOL : RelocInfo::VENEER_POOL;
+ if (SetMode(rmode)) {
+ AdvanceReadPoolData();
return;
}
Advance(kIntSize);
@@ -793,6 +805,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "internal reference";
case RelocInfo::CONST_POOL:
return "constant pool";
+ case RelocInfo::VENEER_POOL:
+ return "veneer pool";
case RelocInfo::DEBUG_BREAK_SLOT:
#ifndef ENABLE_DEBUGGER_SUPPORT
UNREACHABLE();
@@ -880,6 +894,7 @@ void RelocInfo::Verify() {
case EXTERNAL_REFERENCE:
case INTERNAL_REFERENCE:
case CONST_POOL:
+ case VENEER_POOL:
case DEBUG_BREAK_SLOT:
case NONE32:
case NONE64:
@@ -1026,14 +1041,6 @@ ExternalReference ExternalReference::
ExternalReference ExternalReference::
- incremental_evacuation_record_write_function(Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode)));
-}
-
-
-ExternalReference ExternalReference::
store_buffer_overflow_function(Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
@@ -1052,6 +1059,12 @@ ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
}
+ExternalReference ExternalReference::out_of_memory_function(Isolate* isolate) {
+ return
+ ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::OutOfMemory)));
+}
+
+
ExternalReference ExternalReference::delete_handle_scope_extensions(
Isolate* isolate) {
return ExternalReference(Redirect(
@@ -1336,6 +1349,8 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
#elif V8_TARGET_ARCH_IA32
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
+#elif V8_TARGET_ARCH_ARM64
+ function = FUNCTION_ADDR(RegExpMacroAssemblerARM64::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS
@@ -1596,4 +1611,38 @@ bool PositionsRecorder::WriteRecordedPositions() {
return written;
}
+
+MultiplierAndShift::MultiplierAndShift(int32_t d) {
+ ASSERT(d <= -2 || 2 <= d);
+ const uint32_t two31 = 0x80000000;
+ uint32_t ad = Abs(d);
+ uint32_t t = two31 + (uint32_t(d) >> 31);
+ uint32_t anc = t - 1 - t % ad; // Absolute value of nc.
+ int32_t p = 31; // Init. p.
+ uint32_t q1 = two31 / anc; // Init. q1 = 2**p/|nc|.
+ uint32_t r1 = two31 - q1 * anc; // Init. r1 = rem(2**p, |nc|).
+ uint32_t q2 = two31 / ad; // Init. q2 = 2**p/|d|.
+ uint32_t r2 = two31 - q2 * ad; // Init. r2 = rem(2**p, |d|).
+ uint32_t delta;
+ do {
+ p++;
+ q1 *= 2; // Update q1 = 2**p/|nc|.
+ r1 *= 2; // Update r1 = rem(2**p, |nc|).
+ if (r1 >= anc) { // Must be an unsigned comparison here.
+ q1++;
+ r1 = r1 - anc;
+ }
+ q2 *= 2; // Update q2 = 2**p/|d|.
+ r2 *= 2; // Update r2 = rem(2**p, |d|).
+ if (r2 >= ad) { // Must be an unsigned comparison here.
+ q2++;
+ r2 = r2 - ad;
+ }
+ delta = ad - r2;
+ } while (q1 < delta || (q1 == delta && r1 == 0));
+ int32_t mul = static_cast<int32_t>(q2 + 1);
+ multiplier_ = (d < 0) ? -mul : mul;
+ shift_ = p - 32;
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index ce7d9f5b7..0349b0658 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -82,6 +82,10 @@ class AssemblerBase: public Malloced {
int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
+ // This function is called when code generation is aborted, so that
+ // the assembler could clean up internal data structures.
+ virtual void AbortedCodeGeneration() { }
+
static const int kMinimalBufferSize = 4*KB;
protected:
@@ -210,6 +214,12 @@ class Label BASE_EMBEDDED {
friend class Assembler;
friend class Displacement;
friend class RegExpMacroAssemblerIrregexp;
+
+#if V8_TARGET_ARCH_ARM64
+ // On ARM64, the Assembler keeps track of pointers to Labels to resolve
+ // branches to distant targets. Copying labels would confuse the Assembler.
+ DISALLOW_COPY_AND_ASSIGN(Label); // NOLINT
+#endif
};
@@ -276,9 +286,10 @@ class RelocInfo BASE_EMBEDDED {
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
- // Marks a constant pool. Only used on ARM.
- // It uses a custom noncompact encoding.
+ // Marks constant and veneer pools. Only used on ARM and ARM64.
+ // They use a custom noncompact encoding.
CONST_POOL,
+ VENEER_POOL,
// add more as needed
// Pseudo-types
@@ -288,7 +299,7 @@ class RelocInfo BASE_EMBEDDED {
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
// code aging.
FIRST_REAL_RELOC_MODE = CODE_TARGET,
- LAST_REAL_RELOC_MODE = CONST_POOL,
+ LAST_REAL_RELOC_MODE = VENEER_POOL,
FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_CODE_ENUM = DEBUG_BREAK,
@@ -342,6 +353,9 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsConstPool(Mode mode) {
return mode == CONST_POOL;
}
+ static inline bool IsVeneerPool(Mode mode) {
+ return mode == VENEER_POOL;
+ }
static inline bool IsPosition(Mode mode) {
return mode == POSITION || mode == STATEMENT_POSITION;
}
@@ -365,6 +379,15 @@ class RelocInfo BASE_EMBEDDED {
}
static inline int ModeMask(Mode mode) { return 1 << mode; }
+ // Returns true if the first RelocInfo has the same mode and raw data as the
+ // second one.
+ static inline bool IsEqual(RelocInfo first, RelocInfo second) {
+ return first.rmode() == second.rmode() &&
+ (first.rmode() == RelocInfo::NONE64 ?
+ first.raw_data64() == second.raw_data64() :
+ first.data() == second.data());
+ }
+
// Accessors
byte* pc() const { return pc_; }
void set_pc(byte* pc) { pc_ = pc; }
@@ -375,6 +398,7 @@ class RelocInfo BASE_EMBEDDED {
return BitCast<uint64_t>(data64_);
}
Code* host() const { return host_; }
+ void set_host(Code* host) { host_ = host; }
// Apply a relocation by delta bytes
INLINE(void apply(intptr_t delta));
@@ -384,6 +408,10 @@ class RelocInfo BASE_EMBEDDED {
// instructions).
bool IsCodedSpecially();
+ // If true, the pointer this relocation info refers to is an entry in the
+ // constant pool, otherwise the pointer is embedded in the instruction stream.
+ bool IsInConstantPool();
+
// Read/modify the code target in the branch/call instruction
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@@ -406,6 +434,10 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Code* code_age_stub());
INLINE(void set_code_age_stub(Code* stub));
+ // Returns the address of the constant pool entry where the target address
+ // is held. This should only be called if IsInConstantPool returns true.
+ INLINE(Address constant_pool_entry_address());
+
// Read the address of the word containing the target_address in an
// instruction stream. What this means exactly is architecture-independent.
// The only architecture-independent user of this function is the serializer.
@@ -413,6 +445,7 @@ class RelocInfo BASE_EMBEDDED {
// output before the next target. Architecture-independent code shouldn't
// dereference the pointer it gets back from this.
INLINE(Address target_address_address());
+
// This indicates how much space a target takes up when deserializing a code
// stream. For most architectures this is just the size of a pointer. For
// an instruction like movw/movt where the target bits are mixed into the
@@ -537,7 +570,7 @@ class RelocInfoWriter BASE_EMBEDDED {
inline void WriteTaggedPC(uint32_t pc_delta, int tag);
inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
inline void WriteExtraTaggedIntData(int data_delta, int top_tag);
- inline void WriteExtraTaggedConstPoolData(int data);
+ inline void WriteExtraTaggedPoolData(int data, int pool_type);
inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
inline void WriteTaggedData(intptr_t data_delta, int tag);
inline void WriteExtraTag(int extra_tag, int top_tag);
@@ -588,7 +621,7 @@ class RelocIterator: public Malloced {
void ReadTaggedPC();
void AdvanceReadPC();
void AdvanceReadId();
- void AdvanceReadConstPoolData();
+ void AdvanceReadPoolData();
void AdvanceReadPosition();
void AdvanceReadData();
void AdvanceReadVariableLengthPCJump();
@@ -711,12 +744,11 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference incremental_marking_record_write_function(
Isolate* isolate);
- static ExternalReference incremental_evacuation_record_write_function(
- Isolate* isolate);
static ExternalReference store_buffer_overflow_function(
Isolate* isolate);
static ExternalReference flush_icache_function(Isolate* isolate);
static ExternalReference perform_gc_function(Isolate* isolate);
+ static ExternalReference out_of_memory_function(Isolate* isolate);
static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
static ExternalReference get_date_field_function(Isolate* isolate);
@@ -1002,32 +1034,6 @@ class PreservePositionScope BASE_EMBEDDED {
// -----------------------------------------------------------------------------
// Utility functions
-inline bool is_intn(int x, int n) {
- return -(1 << (n-1)) <= x && x < (1 << (n-1));
-}
-
-inline bool is_int8(int x) { return is_intn(x, 8); }
-inline bool is_int16(int x) { return is_intn(x, 16); }
-inline bool is_int18(int x) { return is_intn(x, 18); }
-inline bool is_int24(int x) { return is_intn(x, 24); }
-
-inline bool is_uintn(int x, int n) {
- return (x & -(1 << n)) == 0;
-}
-
-inline bool is_uint2(int x) { return is_uintn(x, 2); }
-inline bool is_uint3(int x) { return is_uintn(x, 3); }
-inline bool is_uint4(int x) { return is_uintn(x, 4); }
-inline bool is_uint5(int x) { return is_uintn(x, 5); }
-inline bool is_uint6(int x) { return is_uintn(x, 6); }
-inline bool is_uint8(int x) { return is_uintn(x, 8); }
-inline bool is_uint10(int x) { return is_uintn(x, 10); }
-inline bool is_uint12(int x) { return is_uintn(x, 12); }
-inline bool is_uint16(int x) { return is_uintn(x, 16); }
-inline bool is_uint24(int x) { return is_uintn(x, 24); }
-inline bool is_uint26(int x) { return is_uintn(x, 26); }
-inline bool is_uint28(int x) { return is_uintn(x, 28); }
-
inline int NumberOfBitsSet(uint32_t x) {
unsigned int num_bits_set;
for (num_bits_set = 0; x; x >>= 1) {
@@ -1065,6 +1071,21 @@ class NullCallWrapper : public CallWrapper {
virtual void AfterCall() const { }
};
+
+// The multiplier and shift for signed division via multiplication, see Warren's
+// "Hacker's Delight", chapter 10.
+class MultiplierAndShift {
+ public:
+ explicit MultiplierAndShift(int32_t d);
+ int32_t multiplier() const { return multiplier_; }
+ int32_t shift() const { return shift_; }
+
+ private:
+ int32_t multiplier_;
+ int32_t shift_;
+};
+
+
} } // namespace v8::internal
#endif // V8_ASSEMBLER_H_
diff --git a/deps/v8/src/assert-scope.cc b/deps/v8/src/assert-scope.cc
new file mode 100644
index 000000000..960567cfa
--- /dev/null
+++ b/deps/v8/src/assert-scope.cc
@@ -0,0 +1,21 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#include "assert-scope.h"
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+uint32_t PerIsolateAssertBase::GetData(Isolate* isolate) {
+ return isolate->per_isolate_assert_data();
+}
+
+
+void PerIsolateAssertBase::SetData(Isolate* isolate, uint32_t data) {
+ isolate->set_per_isolate_assert_data(data);
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/assert-scope.h b/deps/v8/src/assert-scope.h
index 269b280d0..428e6d007 100644
--- a/deps/v8/src/assert-scope.h
+++ b/deps/v8/src/assert-scope.h
@@ -30,6 +30,7 @@
#include "allocation.h"
#include "platform.h"
+#include "utils.h"
namespace v8 {
namespace internal {
@@ -46,7 +47,13 @@ enum PerThreadAssertType {
};
-#ifdef DEBUG
+enum PerIsolateAssertType {
+ JAVASCRIPT_EXECUTION_ASSERT,
+ JAVASCRIPT_EXECUTION_THROWS,
+ ALLOCATION_FAILURE_ASSERT
+};
+
+
class PerThreadAssertData {
public:
PerThreadAssertData() : nesting_level_(0) {
@@ -72,12 +79,9 @@ class PerThreadAssertData {
DISALLOW_COPY_AND_ASSIGN(PerThreadAssertData);
};
-#endif // DEBUG
class PerThreadAssertScopeBase {
-#ifdef DEBUG
-
protected:
PerThreadAssertScopeBase() {
data_ = GetAssertData();
@@ -110,18 +114,12 @@ class PerThreadAssertScopeBase {
static void SetThreadLocalData(PerThreadAssertData* data) {
Thread::SetThreadLocal(thread_local_key, data);
}
-#endif // DEBUG
};
-
template <PerThreadAssertType type, bool allow>
class PerThreadAssertScope : public PerThreadAssertScopeBase {
public:
-#ifndef DEBUG
- PerThreadAssertScope() { }
- static void SetIsAllowed(bool is_allowed) { }
-#else
PerThreadAssertScope() {
old_state_ = data_->get(type);
data_->set(type, allow);
@@ -136,49 +134,140 @@ class PerThreadAssertScope : public PerThreadAssertScopeBase {
private:
bool old_state_;
+
+ DISALLOW_COPY_AND_ASSIGN(PerThreadAssertScope);
+};
+
+
+class PerIsolateAssertBase {
+ protected:
+ static uint32_t GetData(Isolate* isolate);
+ static void SetData(Isolate* isolate, uint32_t data);
+};
+
+
+template <PerIsolateAssertType type, bool allow>
+class PerIsolateAssertScope : public PerIsolateAssertBase {
+ public:
+ explicit PerIsolateAssertScope(Isolate* isolate) : isolate_(isolate) {
+ STATIC_ASSERT(type < 32);
+ old_data_ = GetData(isolate_);
+ SetData(isolate_, DataBit::update(old_data_, allow));
+ }
+
+ ~PerIsolateAssertScope() {
+ SetData(isolate_, old_data_);
+ }
+
+ static bool IsAllowed(Isolate* isolate) {
+ return DataBit::decode(GetData(isolate));
+ }
+
+ private:
+ typedef BitField<bool, type, 1> DataBit;
+
+ uint32_t old_data_;
+ Isolate* isolate_;
+
+ DISALLOW_COPY_AND_ASSIGN(PerIsolateAssertScope);
+};
+
+
+template <PerThreadAssertType type, bool allow>
+#ifdef DEBUG
+class PerThreadAssertScopeDebugOnly : public
+ PerThreadAssertScope<type, allow> {
+#else
+class PerThreadAssertScopeDebugOnly {
+ public:
+ PerThreadAssertScopeDebugOnly() { }
#endif
};
+
+template <PerIsolateAssertType type, bool allow>
+#ifdef DEBUG
+class PerIsolateAssertScopeDebugOnly : public
+ PerIsolateAssertScope<type, allow> {
+ public:
+ explicit PerIsolateAssertScopeDebugOnly(Isolate* isolate)
+ : PerIsolateAssertScope<type, allow>(isolate) { }
+#else
+class PerIsolateAssertScopeDebugOnly {
+ public:
+ explicit PerIsolateAssertScopeDebugOnly(Isolate* isolate) { }
+#endif
+};
+
+// Per-thread assert scopes.
+
// Scope to document where we do not expect handles to be created.
-typedef PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, false>
+typedef PerThreadAssertScopeDebugOnly<HANDLE_ALLOCATION_ASSERT, false>
DisallowHandleAllocation;
// Scope to introduce an exception to DisallowHandleAllocation.
-typedef PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, true>
+typedef PerThreadAssertScopeDebugOnly<HANDLE_ALLOCATION_ASSERT, true>
AllowHandleAllocation;
// Scope to document where we do not expect any allocation and GC.
-typedef PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, false>
+typedef PerThreadAssertScopeDebugOnly<HEAP_ALLOCATION_ASSERT, false>
DisallowHeapAllocation;
// Scope to introduce an exception to DisallowHeapAllocation.
-typedef PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, true>
+typedef PerThreadAssertScopeDebugOnly<HEAP_ALLOCATION_ASSERT, true>
AllowHeapAllocation;
// Scope to document where we do not expect any handle dereferences.
-typedef PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, false>
+typedef PerThreadAssertScopeDebugOnly<HANDLE_DEREFERENCE_ASSERT, false>
DisallowHandleDereference;
// Scope to introduce an exception to DisallowHandleDereference.
-typedef PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>
+typedef PerThreadAssertScopeDebugOnly<HANDLE_DEREFERENCE_ASSERT, true>
AllowHandleDereference;
// Scope to document where we do not expect deferred handles to be dereferenced.
-typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>
+typedef PerThreadAssertScopeDebugOnly<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>
DisallowDeferredHandleDereference;
// Scope to introduce an exception to DisallowDeferredHandleDereference.
-typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>
+typedef PerThreadAssertScopeDebugOnly<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>
AllowDeferredHandleDereference;
// Scope to document where we do not expect deferred handles to be dereferenced.
-typedef PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, false>
+typedef PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, false>
DisallowCodeDependencyChange;
// Scope to introduce an exception to DisallowDeferredHandleDereference.
-typedef PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>
+typedef PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, true>
AllowCodeDependencyChange;
+
+// Per-isolate assert scopes.
+
+// Scope to document where we do not expect javascript execution.
+typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, false>
+ DisallowJavascriptExecution;
+
+// Scope to introduce an exception to DisallowJavascriptExecution.
+typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>
+ AllowJavascriptExecution;
+
+// Scope in which javascript execution leads to exception being thrown.
+typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, false>
+ ThrowOnJavascriptExecution;
+
+// Scope to introduce an exception to ThrowOnJavascriptExecution.
+typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, true>
+ NoThrowOnJavascriptExecution;
+
+// Scope to document where we do not expect an allocation failure.
+typedef PerIsolateAssertScopeDebugOnly<ALLOCATION_FAILURE_ASSERT, false>
+ DisallowAllocationFailure;
+
+// Scope to introduce an exception to DisallowAllocationFailure.
+typedef PerIsolateAssertScopeDebugOnly<ALLOCATION_FAILURE_ASSERT, true>
+ AllowAllocationFailure;
+
} } // namespace v8::internal
#endif // V8_ASSERT_SCOPE_H_
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 1a9919b5a..f6cf18915 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -180,8 +180,8 @@ int FunctionLiteral::end_position() const {
}
-LanguageMode FunctionLiteral::language_mode() const {
- return scope()->language_mode();
+StrictMode FunctionLiteral::strict_mode() const {
+ return scope()->strict_mode();
}
@@ -357,8 +357,7 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
// Allocate a fixed array to hold all the object literals.
Handle<JSArray> array =
isolate->factory()->NewJSArray(0, FAST_HOLEY_SMI_ELEMENTS);
- isolate->factory()->SetElementsCapacityAndLength(
- array, values()->length(), values()->length());
+ JSArray::Expand(array, values()->length());
// Fill in the literals.
bool is_simple = true;
@@ -379,9 +378,9 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
} else if (boilerplate_value->IsUninitialized()) {
is_simple = false;
JSObject::SetOwnElement(
- array, i, handle(Smi::FromInt(0), isolate), kNonStrictMode);
+ array, i, handle(Smi::FromInt(0), isolate), SLOPPY);
} else {
- JSObject::SetOwnElement(array, i, boilerplate_value, kNonStrictMode);
+ JSObject::SetOwnElement(array, i, boilerplate_value, SLOPPY);
}
}
@@ -593,6 +592,17 @@ void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
}
+int Call::ComputeFeedbackSlotCount(Isolate* isolate) {
+ CallType call_type = GetCallType(isolate);
+ if (call_type == LOOKUP_SLOT_CALL || call_type == OTHER_CALL) {
+ // Call only uses a slot in some cases.
+ return 1;
+ }
+
+ return 0;
+}
+
+
Call::CallType Call::GetCallType(Isolate* isolate) const {
VariableProxy* proxy = expression()->AsVariableProxy();
if (proxy != NULL) {
@@ -632,11 +642,14 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+ int allocation_site_feedback_slot = FLAG_pretenuring_call_new
+ ? AllocationSiteFeedbackSlot()
+ : CallNewFeedbackSlot();
allocation_site_ =
- oracle->GetCallNewAllocationSite(CallNewFeedbackId());
- is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackId());
+ oracle->GetCallNewAllocationSite(allocation_site_feedback_slot);
+ is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackSlot());
if (is_monomorphic_) {
- target_ = oracle->GetCallNewTarget(CallNewFeedbackId());
+ target_ = oracle->GetCallNewTarget(CallNewFeedbackSlot());
if (!allocation_site_.is_null()) {
elements_kind_ = allocation_site_->GetElementsKind();
}
@@ -1039,6 +1052,11 @@ CaseClause::CaseClause(Zone* zone,
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
}
+#define REGULAR_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
+ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+ increase_node_count(); \
+ add_slot_node(node); \
+ }
#define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
@@ -1051,6 +1069,12 @@ CaseClause::CaseClause(Zone* zone,
increase_node_count(); \
add_flag(kDontSelfOptimize); \
}
+#define DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
+ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+ increase_node_count(); \
+ add_slot_node(node); \
+ add_flag(kDontSelfOptimize); \
+ }
#define DONT_CACHE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
@@ -1085,8 +1109,8 @@ REGULAR_NODE(CountOperation)
REGULAR_NODE(BinaryOperation)
REGULAR_NODE(CompareOperation)
REGULAR_NODE(ThisFunction)
-REGULAR_NODE(Call)
-REGULAR_NODE(CallNew)
+REGULAR_NODE_WITH_FEEDBACK_SLOTS(Call)
+REGULAR_NODE_WITH_FEEDBACK_SLOTS(CallNew)
// In theory, for VariableProxy we'd have to add:
// if (node->var()->IsLookupSlot()) add_flag(kDontInline);
// But node->var() is usually not bound yet at VariableProxy creation time, and
@@ -1111,11 +1135,12 @@ DONT_OPTIMIZE_NODE(NativeFunctionLiteral)
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement)
-DONT_SELFOPTIMIZE_NODE(ForInStatement)
+DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(ForInStatement)
DONT_SELFOPTIMIZE_NODE(ForOfStatement)
DONT_CACHE_NODE(ModuleLiteral)
+
void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
increase_node_count();
if (node->is_jsruntime()) {
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 2b33820f9..c6ee71ed8 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -32,6 +32,7 @@
#include "assembler.h"
#include "factory.h"
+#include "feedback-slots.h"
#include "isolate.h"
#include "jsregexp.h"
#include "list-inl.h"
@@ -181,7 +182,7 @@ class AstProperties V8_FINAL BASE_EMBEDDED {
public:
class Flags : public EnumSet<AstPropertiesFlag, int> {};
- AstProperties() : node_count_(0) { }
+ AstProperties() : node_count_(0) {}
Flags* flags() { return &flags_; }
int node_count() { return node_count_; }
@@ -914,7 +915,8 @@ class ForEachStatement : public IterationStatement {
};
-class ForInStatement V8_FINAL : public ForEachStatement {
+class ForInStatement V8_FINAL : public ForEachStatement,
+ public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(ForInStatement)
@@ -922,7 +924,16 @@ class ForInStatement V8_FINAL : public ForEachStatement {
return subject();
}
- TypeFeedbackId ForInFeedbackId() const { return reuse(PrepareId()); }
+ // Type feedback information.
+ virtual ComputablePhase GetComputablePhase() { return DURING_PARSE; }
+ virtual int ComputeFeedbackSlotCount(Isolate* isolate) { return 1; }
+ virtual void SetFirstFeedbackSlot(int slot) { for_in_feedback_slot_ = slot; }
+
+ int ForInFeedbackSlot() {
+ ASSERT(for_in_feedback_slot_ != kInvalidFeedbackSlot);
+ return for_in_feedback_slot_;
+ }
+
enum ForInType { FAST_FOR_IN, SLOW_FOR_IN };
ForInType for_in_type() const { return for_in_type_; }
void set_for_in_type(ForInType type) { for_in_type_ = type; }
@@ -936,11 +947,13 @@ class ForInStatement V8_FINAL : public ForEachStatement {
ForInStatement(Zone* zone, ZoneStringList* labels, int pos)
: ForEachStatement(zone, labels, pos),
for_in_type_(SLOW_FOR_IN),
+ for_in_feedback_slot_(kInvalidFeedbackSlot),
body_id_(GetNextId(zone)),
prepare_id_(GetNextId(zone)) {
}
ForInType for_in_type_;
+ int for_in_feedback_slot_;
const BailoutId body_id_;
const BailoutId prepare_id_;
};
@@ -1733,7 +1746,7 @@ class Property V8_FINAL : public Expression {
};
-class Call V8_FINAL : public Expression {
+class Call V8_FINAL : public Expression, public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(Call)
@@ -1741,7 +1754,16 @@ class Call V8_FINAL : public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
// Type feedback information.
- TypeFeedbackId CallFeedbackId() const { return reuse(id()); }
+ virtual ComputablePhase GetComputablePhase() { return AFTER_SCOPING; }
+ virtual int ComputeFeedbackSlotCount(Isolate* isolate);
+ virtual void SetFirstFeedbackSlot(int slot) {
+ call_feedback_slot_ = slot;
+ }
+
+ bool HasCallFeedbackSlot() const {
+ return call_feedback_slot_ != kInvalidFeedbackSlot;
+ }
+ int CallFeedbackSlot() const { return call_feedback_slot_; }
virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
if (expression()->IsProperty()) {
@@ -1790,6 +1812,7 @@ class Call V8_FINAL : public Expression {
: Expression(zone, pos),
expression_(expression),
arguments_(arguments),
+ call_feedback_slot_(kInvalidFeedbackSlot),
return_id_(GetNextId(zone)) {
if (expression->IsProperty()) {
expression->AsProperty()->mark_for_call();
@@ -1802,12 +1825,13 @@ class Call V8_FINAL : public Expression {
Handle<JSFunction> target_;
Handle<Cell> cell_;
+ int call_feedback_slot_;
const BailoutId return_id_;
};
-class CallNew V8_FINAL : public Expression {
+class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
public:
DECLARE_NODE_TYPE(CallNew)
@@ -1815,7 +1839,24 @@ class CallNew V8_FINAL : public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
// Type feedback information.
- TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); }
+ virtual ComputablePhase GetComputablePhase() { return DURING_PARSE; }
+ virtual int ComputeFeedbackSlotCount(Isolate* isolate) {
+ return FLAG_pretenuring_call_new ? 2 : 1;
+ }
+ virtual void SetFirstFeedbackSlot(int slot) {
+ callnew_feedback_slot_ = slot;
+ }
+
+ int CallNewFeedbackSlot() {
+ ASSERT(callnew_feedback_slot_ != kInvalidFeedbackSlot);
+ return callnew_feedback_slot_;
+ }
+ int AllocationSiteFeedbackSlot() {
+ ASSERT(callnew_feedback_slot_ != kInvalidFeedbackSlot);
+ ASSERT(FLAG_pretenuring_call_new);
+ return callnew_feedback_slot_ + 1;
+ }
+
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
Handle<JSFunction> target() const { return target_; }
@@ -1824,6 +1865,8 @@ class CallNew V8_FINAL : public Expression {
return allocation_site_;
}
+ static int feedback_slots() { return 1; }
+
BailoutId ReturnId() const { return return_id_; }
protected:
@@ -1836,6 +1879,7 @@ class CallNew V8_FINAL : public Expression {
arguments_(arguments),
is_monomorphic_(false),
elements_kind_(GetInitialFastElementsKind()),
+ callnew_feedback_slot_(kInvalidFeedbackSlot),
return_id_(GetNextId(zone)) { }
private:
@@ -1846,6 +1890,7 @@ class CallNew V8_FINAL : public Expression {
Handle<JSFunction> target_;
ElementsKind elements_kind_;
Handle<AllocationSite> allocation_site_;
+ int callnew_feedback_slot_;
const BailoutId return_id_;
};
@@ -2276,8 +2321,7 @@ class FunctionLiteral V8_FINAL : public Expression {
int SourceSize() const { return end_position() - start_position(); }
bool is_expression() const { return IsExpression::decode(bitfield_); }
bool is_anonymous() const { return IsAnonymous::decode(bitfield_); }
- bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
- LanguageMode language_mode() const;
+ StrictMode strict_mode() const;
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
@@ -2332,7 +2376,15 @@ class FunctionLiteral V8_FINAL : public Expression {
void set_ast_properties(AstProperties* ast_properties) {
ast_properties_ = *ast_properties;
}
-
+ void set_slot_processor(DeferredFeedbackSlotProcessor* slot_processor) {
+ slot_processor_ = *slot_processor;
+ }
+ void ProcessFeedbackSlots(Isolate* isolate) {
+ slot_processor_.ProcessFeedbackSlots(isolate);
+ }
+ int slot_count() {
+ return slot_processor_.slot_count();
+ }
bool dont_optimize() { return dont_optimize_reason_ != kNoReason; }
BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
void set_dont_optimize_reason(BailoutReason reason) {
@@ -2382,6 +2434,7 @@ class FunctionLiteral V8_FINAL : public Expression {
ZoneList<Statement*>* body_;
Handle<String> inferred_name_;
AstProperties ast_properties_;
+ DeferredFeedbackSlotProcessor slot_processor_;
BailoutReason dont_optimize_reason_;
int materialized_literal_count_;
@@ -2856,10 +2909,13 @@ private: \
class AstConstructionVisitor BASE_EMBEDDED {
public:
- AstConstructionVisitor() : dont_optimize_reason_(kNoReason) { }
+ explicit AstConstructionVisitor(Zone* zone)
+ : dont_optimize_reason_(kNoReason),
+ zone_(zone) { }
AstProperties* ast_properties() { return &properties_; }
BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
+ DeferredFeedbackSlotProcessor* slot_processor() { return &slot_processor_; }
private:
template<class> friend class AstNodeFactory;
@@ -2876,13 +2932,21 @@ class AstConstructionVisitor BASE_EMBEDDED {
dont_optimize_reason_ = reason;
}
+ void add_slot_node(FeedbackSlotInterface* slot_node) {
+ slot_processor_.add_slot_node(zone_, slot_node);
+ }
+
AstProperties properties_;
+ DeferredFeedbackSlotProcessor slot_processor_;
BailoutReason dont_optimize_reason_;
+ Zone* zone_;
};
class AstNullVisitor BASE_EMBEDDED {
public:
+ explicit AstNullVisitor(Zone* zone) {}
+
// Node visitors.
#define DEF_VISIT(type) \
void Visit##type(type* node) {}
@@ -2898,7 +2962,9 @@ class AstNullVisitor BASE_EMBEDDED {
template<class Visitor>
class AstNodeFactory V8_FINAL BASE_EMBEDDED {
public:
- explicit AstNodeFactory(Zone* zone) : zone_(zone) { }
+ explicit AstNodeFactory(Zone* zone)
+ : zone_(zone),
+ visitor_(zone) { }
Visitor* visitor() { return &visitor_; }
diff --git a/deps/v8/src/atomicops.h b/deps/v8/src/atomicops.h
index 789721edf..08be2a7d3 100644
--- a/deps/v8/src/atomicops.h
+++ b/deps/v8/src/atomicops.h
@@ -51,6 +51,15 @@
#include "../include/v8.h"
#include "globals.h"
+#if defined(_WIN32) && defined(V8_HOST_ARCH_64_BIT)
+// windows.h #defines this (only on x64). This causes problems because the
+// public API also uses MemoryBarrier at the public name for this fence. So, on
+// X64, undef it, and call its documented
+// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
+// implementation directly.
+#undef MemoryBarrier
+#endif
+
namespace v8 {
namespace internal {
@@ -58,9 +67,7 @@ typedef int32_t Atomic32;
#ifdef V8_HOST_ARCH_64_BIT
// We need to be able to go between Atomic64 and AtomicWord implicitly. This
// means Atomic64 and AtomicWord should be the same type on 64-bit.
-#if defined(__ILP32__) || defined(__APPLE__)
-// MacOS is an exception to the implicit conversion rule above,
-// because it uses long for intptr_t.
+#if defined(__ILP32__)
typedef int64_t Atomic64;
#else
typedef intptr_t Atomic64;
@@ -69,11 +76,7 @@ typedef intptr_t Atomic64;
// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
// Atomic64 routines below, depending on your architecture.
-#if defined(__OpenBSD__) && defined(__i386__)
-typedef Atomic32 AtomicWord;
-#else
typedef intptr_t AtomicWord;
-#endif
// Atomically execute:
// result = *ptr;
@@ -155,16 +158,24 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "atomicops_internals_tsan.h"
#elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_msvc.h"
-#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
-#include "atomicops_internals_x86_macosx.h"
-#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
-#include "atomicops_internals_x86_gcc.h"
+#elif defined(__APPLE__)
+#include "atomicops_internals_mac.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_ARM64
+#include "atomicops_internals_arm64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "atomicops_internals_arm_gcc.h"
+#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+#include "atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
#include "atomicops_internals_mips_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
#endif
+// On some platforms we need additional declarations to make
+// AtomicWord compatible with our other Atomic* types.
+#if defined(__APPLE__) || defined(__OpenBSD__)
+#include "atomicops_internals_atomicword_compat.h"
+#endif
+
#endif // V8_ATOMICOPS_H_
diff --git a/deps/v8/src/atomicops_internals_arm64_gcc.h b/deps/v8/src/atomicops_internals_arm64_gcc.h
new file mode 100644
index 000000000..e6cac1993
--- /dev/null
+++ b/deps/v8/src/atomicops_internals_arm64_gcc.h
@@ -0,0 +1,372 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
+#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
+
+namespace v8 {
+namespace internal {
+
+inline void MemoryBarrier() {
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t" // Data memory barrier.
+ ::: "memory"
+ ); // NOLINT
+}
+
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
+ "cmp %w[prev], %w[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ "1: \n\t"
+ "clrex \n\t" // In case we didn't swap.
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [new_value]"r" (new_value)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
+ "add %w[result], %w[result], %w[increment]\n\t"
+ "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
+ "cbnz %w[temp], 0b \n\t" // Retry on failure.
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [increment]"r" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ MemoryBarrier();
+ Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+
+ return result;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
+ "cmp %w[prev], %w[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ "dmb ish \n\t" // Data memory barrier.
+ "1: \n\t"
+ // If the compare failed the 'dmb' is unnecessary, but we still need a
+ // 'clrex'.
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ int32_t temp;
+
+ MemoryBarrier();
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
+ "cmp %w[prev], %w[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ "1: \n\t"
+ // If the compare failed the we still need a 'clrex'.
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+// 64-bit versions of the operations.
+// See the 32-bit versions for comments.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[prev], %[ptr] \n\t"
+ "cmp %[prev], %[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "1: \n\t"
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ Atomic64 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[result], %[ptr] \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [new_value]"r" (new_value)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[result], %[ptr] \n\t"
+ "add %[result], %[result], %[increment] \n\t"
+ "stxr %w[temp], %[result], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [increment]"r" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ MemoryBarrier();
+ Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+
+ return result;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[prev], %[ptr] \n\t"
+ "cmp %[prev], %[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "dmb ish \n\t"
+ "1: \n\t"
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ int32_t temp;
+
+ MemoryBarrier();
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[prev], %[ptr] \n\t"
+ "cmp %[prev], %[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "1: \n\t"
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+} } // namespace v8::internal
+
+#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/deps/v8/src/atomicops_internals_arm_gcc.h b/deps/v8/src/atomicops_internals_arm_gcc.h
index 6c30256d9..918920d02 100644
--- a/deps/v8/src/atomicops_internals_arm_gcc.h
+++ b/deps/v8/src/atomicops_internals_arm_gcc.h
@@ -32,46 +32,197 @@
#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
+#if defined(__QNXNTO__)
+#include <sys/cpuinline.h>
+#endif
+
namespace v8 {
namespace internal {
-// 0xffff0fc0 is the hard coded address of a function provided by
-// the kernel which implements an atomic compare-exchange. On older
-// ARM architecture revisions (pre-v6) this may be implemented using
-// a syscall. This address is stable, and in active use (hard coded)
-// by at least glibc-2.7 and the Android C library.
-typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
- Atomic32 new_value,
- volatile Atomic32* ptr);
-LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) =
- (LinuxKernelCmpxchgFunc) 0xffff0fc0;
+// Memory barriers on ARM are funky, but the kernel is here to help:
+//
+// * ARMv5 didn't support SMP, there is no memory barrier instruction at
+// all on this architecture, or when targeting its machine code.
+//
+// * Some ARMv6 CPUs support SMP. A full memory barrier can be produced by
+// writing a random value to a very specific coprocessor register.
+//
+// * On ARMv7, the "dmb" instruction is used to perform a full memory
+// barrier (though writing to the co-processor will still work).
+// However, on single core devices (e.g. Nexus One, or Nexus S),
+// this instruction will take up to 200 ns, which is huge, even though
+// it's completely un-needed on these devices.
+//
+// * There is no easy way to determine at runtime if the device is
+// single or multi-core. However, the kernel provides a useful helper
+// function at a fixed memory address (0xffff0fa0), which will always
+// perform a memory barrier in the most efficient way. I.e. on single
+// core devices, this is an empty function that exits immediately.
+// On multi-core devices, it implements a full memory barrier.
+//
+// * This source could be compiled to ARMv5 machine code that runs on a
+// multi-core ARMv6 or ARMv7 device. In this case, memory barriers
+// are needed for correct execution. Always call the kernel helper, even
+// when targeting ARMv5TE.
+//
-typedef void (*LinuxKernelMemoryBarrierFunc)(void);
-LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
- (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
+inline void MemoryBarrier() {
+#if defined(__linux__) || defined(__ANDROID__)
+ // Note: This is a function call, which is also an implicit compiler barrier.
+ typedef void (*KernelMemoryBarrierFunc)();
+ ((KernelMemoryBarrierFunc)0xffff0fa0)();
+#elif defined(__QNXNTO__)
+ __cpu_membarrier();
+#else
+#error MemoryBarrier() is not implemented on this platform.
+#endif
+}
+// An ARM toolchain would only define one of these depending on which
+// variant of the target architecture is being used. This tests against
+// any known ARMv6 or ARMv7 variant, where it is possible to directly
+// use ldrex/strex instructions to implement fast atomic operations.
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \
+ defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
+ defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
+ defined(__ARM_ARCH_6KZ__) || defined(__ARM_ARCH_6T2__)
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- Atomic32 prev_value = *ptr;
+ Atomic32 prev_value;
+ int reloop;
do {
- if (!pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
+ // The following is equivalent to:
+ //
+ // prev_value = LDREX(ptr)
+ // reloop = 0
+ // if (prev_value != old_value)
+ // reloop = STREX(ptr, new_value)
+ __asm__ __volatile__(" ldrex %0, [%3]\n"
+ " mov %1, #0\n"
+ " cmp %0, %4\n"
+#ifdef __thumb2__
+ " it eq\n"
+#endif
+ " strexeq %1, %5, [%3]\n"
+ : "=&r"(prev_value), "=&r"(reloop), "+m"(*ptr)
+ : "r"(ptr), "r"(old_value), "r"(new_value)
+ : "cc", "memory");
+ } while (reloop != 0);
return prev_value;
}
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 result = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
+ return result;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ MemoryBarrier();
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 value;
+ int reloop;
+ do {
+ // Equivalent to:
+ //
+ // value = LDREX(ptr)
+ // value += increment
+ // reloop = STREX(ptr, value)
+ //
+ __asm__ __volatile__(" ldrex %0, [%3]\n"
+ " add %0, %0, %4\n"
+ " strex %1, %0, [%3]\n"
+ : "=&r"(value), "=&r"(reloop), "+m"(*ptr)
+ : "r"(ptr), "r"(increment)
+ : "cc", "memory");
+ } while (reloop);
+ return value;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ // TODO(digit): Investigate if it's possible to implement this with
+ // a single MemoryBarrier() operation between the LDREX and STREX.
+ // See http://crbug.com/246514
+ MemoryBarrier();
+ Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+ return result;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ int reloop;
+ do {
+ // old_value = LDREX(ptr)
+ // reloop = STREX(ptr, new_value)
+ __asm__ __volatile__(" ldrex %0, [%3]\n"
+ " strex %1, %4, [%3]\n"
+ : "=&r"(old_value), "=&r"(reloop), "+m"(*ptr)
+ : "r"(ptr), "r"(new_value)
+ : "cc", "memory");
+ } while (reloop != 0);
+ return old_value;
+}
+
+// This tests against any known ARMv5 variant.
+#elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \
+ defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
+
+// The kernel also provides a helper function to perform an atomic
+// compare-and-swap operation at the hard-wired address 0xffff0fc0.
+// On ARMv5, this is implemented by a special code path that the kernel
+// detects and treats specially when thread pre-emption happens.
+// On ARMv6 and higher, it uses LDREX/STREX instructions instead.
+//
+// Note that this always perform a full memory barrier, there is no
+// need to add calls MemoryBarrier() before or after it. It also
+// returns 0 on success, and 1 on exit.
+//
+// Available and reliable since Linux 2.6.24. Both Android and ChromeOS
+// use newer kernel revisions, so this should not be a concern.
+namespace {
+
+inline int LinuxKernelCmpxchg(Atomic32 old_value,
+ Atomic32 new_value,
+ volatile Atomic32* ptr) {
+ typedef int (*KernelCmpxchgFunc)(Atomic32, Atomic32, volatile Atomic32*);
+ return ((KernelCmpxchgFunc)0xffff0fc0)(old_value, new_value, ptr);
+}
+
+} // namespace
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev_value;
+ for (;;) {
+ prev_value = *ptr;
+ if (prev_value != old_value)
+ return prev_value;
+ if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
+ return old_value;
+ }
+}
+
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
old_value = *ptr;
- } while (pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr)));
+ } while (LinuxKernelCmpxchg(old_value, new_value, ptr));
return old_value;
}
@@ -86,8 +237,7 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
// Atomic exchange the old value with an incremented one.
Atomic32 old_value = *ptr;
Atomic32 new_value = old_value + increment;
- if (pLinuxKernelCmpxchg(old_value, new_value,
- const_cast<Atomic32*>(ptr)) == 0) {
+ if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) {
// The exchange took place as expected.
return new_value;
}
@@ -98,23 +248,46 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ Atomic32 prev_value;
+ for (;;) {
+ prev_value = *ptr;
+ if (prev_value != old_value) {
+ // Always ensure acquire semantics.
+ MemoryBarrier();
+ return prev_value;
+ }
+ if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
+ return old_value;
+ }
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ // This could be implemented as:
+ // MemoryBarrier();
+ // return NoBarrier_CompareAndSwap();
+ //
+ // But would use 3 barriers per succesful CAS. To save performance,
+ // use Acquire_CompareAndSwap(). Its implementation guarantees that:
+ // - A succesful swap uses only 2 barriers (in the kernel helper).
+ // - An early return due to (prev_value != old_value) performs
+ // a memory barrier with no store, which is equivalent to the
+ // generic implementation above.
+ return Acquire_CompareAndSwap(ptr, old_value, new_value);
}
+#else
+# error "Your CPU's ARM architecture is not supported yet"
+#endif
+
+// NOTE: Atomicity of the following load and store operations is only
+// guaranteed in case of 32-bit alignement of |ptr| values.
+
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
-inline void MemoryBarrier() {
- pLinuxKernelMemoryBarrier();
-}
-
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
@@ -125,9 +298,7 @@ inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
diff --git a/deps/v8/src/atomicops_internals_atomicword_compat.h b/deps/v8/src/atomicops_internals_atomicword_compat.h
new file mode 100644
index 000000000..5934f7068
--- /dev/null
+++ b/deps/v8/src/atomicops_internals_atomicword_compat.h
@@ -0,0 +1,122 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+#define V8_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+
+// AtomicWord is a synonym for intptr_t, and Atomic32 is a synonym for int32,
+// which in turn means int. On some LP32 platforms, intptr_t is an int, but
+// on others, it's a long. When AtomicWord and Atomic32 are based on different
+// fundamental types, their pointers are incompatible.
+//
+// This file defines function overloads to allow both AtomicWord and Atomic32
+// data to be used with this interface.
+//
+// On LP64 platforms, AtomicWord and Atomic64 are both always long,
+// so this problem doesn't occur.
+
+#if !defined(V8_HOST_ARCH_64_BIT)
+
+namespace v8 {
+namespace internal {
+
+inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return NoBarrier_CompareAndSwap(
+ reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
+ AtomicWord new_value) {
+ return NoBarrier_AtomicExchange(
+ reinterpret_cast<volatile Atomic32*>(ptr), new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
+ AtomicWord increment) {
+ return NoBarrier_AtomicIncrement(
+ reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
+ AtomicWord increment) {
+ return Barrier_AtomicIncrement(
+ reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return v8::internal::Acquire_CompareAndSwap(
+ reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return v8::internal::Release_CompareAndSwap(
+ reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
+ NoBarrier_Store(
+ reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
+ return v8::internal::Acquire_Store(
+ reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
+ return v8::internal::Release_Store(
+ reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
+ return NoBarrier_Load(
+ reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
+ return v8::internal::Acquire_Load(
+ reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
+ return v8::internal::Release_Load(
+ reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+} } // namespace v8::internal
+
+#endif // !defined(V8_HOST_ARCH_64_BIT)
+
+#endif // V8_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
diff --git a/deps/v8/src/atomicops_internals_x86_macosx.h b/deps/v8/src/atomicops_internals_mac.h
index bfb02b385..4bd0c09bd 100644
--- a/deps/v8/src/atomicops_internals_x86_macosx.h
+++ b/deps/v8/src/atomicops_internals_mac.h
@@ -27,8 +27,8 @@
// This file is an internal atomic implementation, use atomicops.h instead.
-#ifndef V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
-#define V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
+#ifndef V8_ATOMICOPS_INTERNALS_MAC_H_
+#define V8_ATOMICOPS_INTERNALS_MAC_H_
#include <libkern/OSAtomic.h>
@@ -65,7 +65,7 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
+ Atomic32 increment) {
return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
}
@@ -132,7 +132,7 @@ inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 prev_value;
do {
if (OSAtomicCompareAndSwap64(old_value, new_value,
- const_cast<Atomic64*>(ptr))) {
+ reinterpret_cast<volatile int64_t*>(ptr))) {
return old_value;
}
prev_value = *ptr;
@@ -146,18 +146,19 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
do {
old_value = *ptr;
} while (!OSAtomicCompareAndSwap64(old_value, new_value,
- const_cast<Atomic64*>(ptr)));
+ reinterpret_cast<volatile int64_t*>(ptr)));
return old_value;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
- return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
+ return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
- return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
+ return OSAtomicAdd64Barrier(increment,
+ reinterpret_cast<volatile int64_t*>(ptr));
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
@@ -165,8 +166,8 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 prev_value;
do {
- if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
- const_cast<Atomic64*>(ptr))) {
+ if (OSAtomicCompareAndSwap64Barrier(
+ old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {
return old_value;
}
prev_value = *ptr;
@@ -213,89 +214,6 @@ inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
#endif // defined(__LP64__)
-// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
-// on the Mac, even when they are the same size. We need to explicitly cast
-// from AtomicWord to Atomic32/64 to implement the AtomicWord interface.
-#ifdef __LP64__
-#define AtomicWordCastType Atomic64
-#else
-#define AtomicWordCastType Atomic32
-#endif
-
-inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
- AtomicWord old_value,
- AtomicWord new_value) {
- return NoBarrier_CompareAndSwap(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr),
- old_value, new_value);
-}
-
-inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
- AtomicWord new_value) {
- return NoBarrier_AtomicExchange(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
-}
-
-inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
- AtomicWord increment) {
- return NoBarrier_AtomicIncrement(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
-}
-
-inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
- AtomicWord increment) {
- return Barrier_AtomicIncrement(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
-}
-
-inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
- AtomicWord old_value,
- AtomicWord new_value) {
- return v8::internal::Acquire_CompareAndSwap(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr),
- old_value, new_value);
-}
-
-inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
- AtomicWord old_value,
- AtomicWord new_value) {
- return v8::internal::Release_CompareAndSwap(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr),
- old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) {
- NoBarrier_Store(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
-inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
- return v8::internal::Acquire_Store(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
-inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
- return v8::internal::Release_Store(
- reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
-inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) {
- return NoBarrier_Load(
- reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
-inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
- return v8::internal::Acquire_Load(
- reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
-inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
- return v8::internal::Release_Load(
- reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
-#undef AtomicWordCastType
-
} } // namespace v8::internal
-#endif // V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
+#endif // V8_ATOMICOPS_INTERNALS_MAC_H_
diff --git a/deps/v8/src/atomicops_internals_tsan.h b/deps/v8/src/atomicops_internals_tsan.h
index b5162bad9..1819798a5 100644
--- a/deps/v8/src/atomicops_internals_tsan.h
+++ b/deps/v8/src/atomicops_internals_tsan.h
@@ -53,10 +53,7 @@ extern struct AtomicOps_x86CPUFeatureStruct
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-#ifdef __cplusplus
extern "C" {
-#endif
-
typedef char __tsan_atomic8;
typedef short __tsan_atomic16; // NOLINT
typedef int __tsan_atomic32;
@@ -80,152 +77,149 @@ typedef enum {
__tsan_memory_order_seq_cst,
} __tsan_memory_order;
-__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
__tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
__tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
__tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
__tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128* a,
__tsan_memory_order mo);
-void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
+void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
__tsan_memory_order mo);
-void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
+void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
__tsan_memory_order mo);
-void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
+void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
__tsan_memory_order mo);
-void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
+void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
__tsan_memory_order mo);
-void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
+void __tsan_atomic128_store(volatile __tsan_atomic128* a, __tsan_atomic128 v,
__tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128* a,
__tsan_atomic128 v, __tsan_memory_order mo);
-__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128* a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
-int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
- __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
+ __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
- __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
+ __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
- __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
+ __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
- __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
+ __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
- __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128* a,
+ __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
- __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
+ __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
- __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
+ __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
- __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
+ __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
- __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
+ __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
-int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
- __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128* a,
+ __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo);
__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
- volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
+ volatile __tsan_atomic8* a, __tsan_atomic8 c, __tsan_atomic8 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
- volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
+ volatile __tsan_atomic16* a, __tsan_atomic16 c, __tsan_atomic16 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
- volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
+ volatile __tsan_atomic32* a, __tsan_atomic32 c, __tsan_atomic32 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
- volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
+ volatile __tsan_atomic64* a, __tsan_atomic64 c, __tsan_atomic64 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
- volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
+ volatile __tsan_atomic128* a, __tsan_atomic128 c, __tsan_atomic128 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo);
void __tsan_atomic_thread_fence(__tsan_memory_order mo);
void __tsan_atomic_signal_fence(__tsan_memory_order mo);
-
-#ifdef __cplusplus
} // extern "C"
-#endif
#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
@@ -234,37 +228,37 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
return cmp;
}
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_relaxed);
}
-inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_acquire);
}
-inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
+inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_release);
}
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
__tsan_memory_order_relaxed);
}
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
__tsan_memory_order_acq_rel);
}
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
@@ -273,7 +267,7 @@ inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
return cmp;
}
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
@@ -282,33 +276,33 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
return cmp;
}
-inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
}
-inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
-inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
}
-inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
}
-inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
@@ -317,60 +311,60 @@ inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
return cmp;
}
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
}
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
}
-inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
+inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
}
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
__tsan_memory_order_relaxed);
}
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
__tsan_memory_order_acq_rel);
}
-inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
}
-inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
-inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
}
-inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
}
-inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
@@ -379,7 +373,7 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
return cmp;
}
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
diff --git a/deps/v8/src/atomicops_internals_x86_msvc.h b/deps/v8/src/atomicops_internals_x86_msvc.h
index fcf6a6510..ad9cf9d80 100644
--- a/deps/v8/src/atomicops_internals_x86_msvc.h
+++ b/deps/v8/src/atomicops_internals_x86_msvc.h
@@ -33,6 +33,15 @@
#include "checks.h"
#include "win32-headers.h"
+#if defined(V8_HOST_ARCH_64_BIT)
+// windows.h #defines this (only on x64). This causes problems because the
+// public API also uses MemoryBarrier at the public name for this fence. So, on
+// X64, undef it, and call its documented
+// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
+// implementation directly.
+#undef MemoryBarrier
+#endif
+
namespace v8 {
namespace internal {
@@ -70,8 +79,13 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
#error "We require at least vs2005 for MemoryBarrier"
#endif
inline void MemoryBarrier() {
+#if defined(V8_HOST_ARCH_64_BIT)
+ // See #undef and note at the top of this file.
+ __faststorefence();
+#else
// We use MemoryBarrier from WinNT.h
::MemoryBarrier();
+#endif
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index ef802ba98..c4d7adfbb 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -88,6 +88,8 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) {
source.length());
Handle<String> source_code =
isolate_->factory()->NewExternalStringFromAscii(resource);
+ // We do not expect this to throw an exception. Change this if it does.
+ CHECK_NOT_EMPTY_HANDLE(isolate_, source_code);
heap->natives_source_cache()->set(index, *source_code);
}
Handle<Object> cached_source(heap->natives_source_cache()->get(index),
@@ -152,7 +154,7 @@ char* Bootstrapper::AllocateAutoDeletedArray(int bytes) {
void Bootstrapper::TearDown() {
if (delete_these_non_arrays_on_tear_down_ != NULL) {
int len = delete_these_non_arrays_on_tear_down_->length();
- ASSERT(len < 20); // Don't use this mechanism for unbounded allocations.
+ ASSERT(len < 24); // Don't use this mechanism for unbounded allocations.
for (int i = 0; i < len; i++) {
delete delete_these_non_arrays_on_tear_down_->at(i);
delete_these_non_arrays_on_tear_down_->at(i) = NULL;
@@ -231,6 +233,7 @@ class Genesis BASE_EMBEDDED {
// Installs the contents of the native .js files on the global objects.
// Used for creating a context from scratch.
void InstallNativeFunctions();
+ void InstallExperimentalBuiltinFunctionIds();
void InstallExperimentalNativeFunctions();
Handle<JSFunction> InstallInternalArray(Handle<JSBuiltinsObject> builtins,
const char* name,
@@ -299,7 +302,7 @@ class Genesis BASE_EMBEDDED {
PrototypePropertyMode prototypeMode);
void MakeFunctionInstancePrototypeWritable();
- Handle<Map> CreateStrictModeFunctionMap(
+ Handle<Map> CreateStrictFunctionMap(
PrototypePropertyMode prototype_mode,
Handle<JSFunction> empty_function);
@@ -327,8 +330,8 @@ class Genesis BASE_EMBEDDED {
// prototype for the processing of JS builtins. Later the function maps are
// replaced in order to make prototype writable. These are the final, writable
// prototype, maps.
- Handle<Map> function_map_writable_prototype_;
- Handle<Map> strict_mode_function_map_writable_prototype_;
+ Handle<Map> sloppy_function_map_writable_prototype_;
+ Handle<Map> strict_function_map_writable_prototype_;
Handle<JSFunction> throw_type_error_function;
BootstrapperActive active_;
@@ -473,18 +476,19 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// can not be used as constructors.
Handle<Map> function_without_prototype_map =
CreateFunctionMap(DONT_ADD_PROTOTYPE);
- native_context()->set_function_without_prototype_map(
+ native_context()->set_sloppy_function_without_prototype_map(
*function_without_prototype_map);
// Allocate the function map. This map is temporary, used only for processing
// of builtins.
// Later the map is replaced with writable prototype map, allocated below.
Handle<Map> function_map = CreateFunctionMap(ADD_READONLY_PROTOTYPE);
- native_context()->set_function_map(*function_map);
+ native_context()->set_sloppy_function_map(*function_map);
// The final map for functions. Writeable prototype.
// This map is installed in MakeFunctionInstancePrototypeWritable.
- function_map_writable_prototype_ = CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
+ sloppy_function_map_writable_prototype_ =
+ CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
Factory* factory = isolate->factory();
@@ -518,7 +522,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
Handle<String> empty_string =
factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("Empty"));
Handle<JSFunction> empty_function =
- factory->NewFunctionWithoutPrototype(empty_string, CLASSIC_MODE);
+ factory->NewFunctionWithoutPrototype(empty_string, SLOPPY);
// --- E m p t y ---
Handle<Code> code =
@@ -536,10 +540,10 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
empty_function->shared()->DontAdaptArguments();
// Set prototypes for the function maps.
- native_context()->function_map()->set_prototype(*empty_function);
- native_context()->function_without_prototype_map()->
+ native_context()->sloppy_function_map()->set_prototype(*empty_function);
+ native_context()->sloppy_function_without_prototype_map()->
set_prototype(*empty_function);
- function_map_writable_prototype_->set_prototype(*empty_function);
+ sloppy_function_map_writable_prototype_->set_prototype(*empty_function);
// Allocate the function map first and then patch the prototype later
Handle<Map> empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE);
@@ -603,11 +607,10 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() {
Handle<String> name = factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("ThrowTypeError"));
throw_type_error_function =
- factory()->NewFunctionWithoutPrototype(name, CLASSIC_MODE);
+ factory()->NewFunctionWithoutPrototype(name, SLOPPY);
Handle<Code> code(isolate()->builtins()->builtin(
Builtins::kStrictModePoisonPill));
- throw_type_error_function->set_map(
- native_context()->function_map());
+ throw_type_error_function->set_map(native_context()->sloppy_function_map());
throw_type_error_function->set_code(*code);
throw_type_error_function->shared()->set_code(*code);
throw_type_error_function->shared()->DontAdaptArguments();
@@ -618,7 +621,7 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() {
}
-Handle<Map> Genesis::CreateStrictModeFunctionMap(
+Handle<Map> Genesis::CreateStrictFunctionMap(
PrototypePropertyMode prototype_mode,
Handle<JSFunction> empty_function) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
@@ -631,28 +634,27 @@ Handle<Map> Genesis::CreateStrictModeFunctionMap(
void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// Allocate map for the prototype-less strict mode instances.
- Handle<Map> strict_mode_function_without_prototype_map =
- CreateStrictModeFunctionMap(DONT_ADD_PROTOTYPE, empty);
- native_context()->set_strict_mode_function_without_prototype_map(
- *strict_mode_function_without_prototype_map);
+ Handle<Map> strict_function_without_prototype_map =
+ CreateStrictFunctionMap(DONT_ADD_PROTOTYPE, empty);
+ native_context()->set_strict_function_without_prototype_map(
+ *strict_function_without_prototype_map);
// Allocate map for the strict mode functions. This map is temporary, used
// only for processing of builtins.
// Later the map is replaced with writable prototype map, allocated below.
- Handle<Map> strict_mode_function_map =
- CreateStrictModeFunctionMap(ADD_READONLY_PROTOTYPE, empty);
- native_context()->set_strict_mode_function_map(
- *strict_mode_function_map);
+ Handle<Map> strict_function_map =
+ CreateStrictFunctionMap(ADD_READONLY_PROTOTYPE, empty);
+ native_context()->set_strict_function_map(*strict_function_map);
// The final map for the strict mode functions. Writeable prototype.
// This map is installed in MakeFunctionInstancePrototypeWritable.
- strict_mode_function_map_writable_prototype_ =
- CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
+ strict_function_map_writable_prototype_ =
+ CreateStrictFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
// Complete the callbacks.
- PoisonArgumentsAndCaller(strict_mode_function_without_prototype_map);
- PoisonArgumentsAndCaller(strict_mode_function_map);
- PoisonArgumentsAndCaller(strict_mode_function_map_writable_prototype_);
+ PoisonArgumentsAndCaller(strict_function_without_prototype_map);
+ PoisonArgumentsAndCaller(strict_function_map);
+ PoisonArgumentsAndCaller(strict_function_map_writable_prototype_);
}
@@ -1097,7 +1099,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
{ \
Handle<JSFunction> fun = InstallTypedArray(#Type "Array", \
- EXTERNAL_##TYPE##_ELEMENTS); \
+ TYPE##_ELEMENTS); \
native_context()->set_##type##_array_fun(*fun); \
}
TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
@@ -1112,6 +1114,18 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
native_context()->set_data_view_fun(*data_view_fun);
}
+ { // -- W e a k M a p
+ InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
+ isolate->initial_object_prototype(),
+ Builtins::kIllegal, true, true);
+ }
+
+ { // -- W e a k S e t
+ InstallFunction(global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize,
+ isolate->initial_object_prototype(),
+ Builtins::kIllegal, true, true);
+ }
+
{ // --- arguments_boilerplate_
// Make sure we can recognize argument objects at runtime.
// This is done by introducing an anonymous function with
@@ -1136,7 +1150,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
function->shared()->set_expected_nof_properties(2);
Handle<JSObject> result = factory->NewJSObject(function);
- native_context()->set_arguments_boilerplate(*result);
+ native_context()->set_sloppy_arguments_boilerplate(*result);
// Note: length must be added as the first property and
// callee must be added as the second property.
CHECK_NOT_EMPTY_HANDLE(isolate,
@@ -1172,22 +1186,23 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
{ // --- aliased_arguments_boilerplate_
// Set up a well-formed parameter map to make assertions happy.
Handle<FixedArray> elements = factory->NewFixedArray(2);
- elements->set_map(heap->non_strict_arguments_elements_map());
+ elements->set_map(heap->sloppy_arguments_elements_map());
Handle<FixedArray> array;
array = factory->NewFixedArray(0);
elements->set(0, *array);
array = factory->NewFixedArray(0);
elements->set(1, *array);
- Handle<Map> old_map(native_context()->arguments_boilerplate()->map());
+ Handle<Map> old_map(
+ native_context()->sloppy_arguments_boilerplate()->map());
Handle<Map> new_map = factory->CopyMap(old_map);
new_map->set_pre_allocated_property_fields(2);
Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
// Set elements kind after allocating the object because
// NewJSObjectFromMap assumes a fast elements map.
- new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
+ new_map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS);
result->set_elements(*elements);
- ASSERT(result->HasNonStrictArgumentsElements());
+ ASSERT(result->HasSloppyArgumentsElements());
native_context()->set_aliased_arguments_boilerplate(*result);
}
@@ -1210,7 +1225,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Create the map. Allocate one in-object field for length.
Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
- Heap::kArgumentsObjectSizeStrict);
+ Heap::kStrictArgumentsObjectSize);
// Create the descriptor array for the arguments object.
Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(0, 3);
DescriptorArray::WhitenessWitness witness(*descriptors);
@@ -1239,13 +1254,13 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
map->set_pre_allocated_property_fields(1);
map->set_inobject_properties(1);
- // Copy constructor from the non-strict arguments boilerplate.
+ // Copy constructor from the sloppy arguments boilerplate.
map->set_constructor(
- native_context()->arguments_boilerplate()->map()->constructor());
+ native_context()->sloppy_arguments_boilerplate()->map()->constructor());
// Allocate the arguments boilerplate object.
Handle<JSObject> result = factory->NewJSObjectFromMap(map);
- native_context()->set_strict_mode_arguments_boilerplate(*result);
+ native_context()->set_strict_arguments_boilerplate(*result);
// Add length property only for strict mode boilerplate.
CHECK_NOT_EMPTY_HANDLE(isolate,
@@ -1309,9 +1324,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
delegate->shared()->DontAdaptArguments();
}
- // Initialize the out of memory slot.
- native_context()->set_out_of_memory(heap->false_value());
-
// Initialize the embedder data slot.
Handle<FixedArray> embedder_data = factory->NewFixedArray(3);
native_context()->set_embedder_data(*embedder_data);
@@ -1349,23 +1361,13 @@ void Genesis::InitializeExperimentalGlobal() {
}
if (FLAG_harmony_collections) {
- { // -- S e t
- InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, true, true);
- }
{ // -- M a p
InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
isolate()->initial_object_prototype(),
Builtins::kIllegal, true, true);
}
- { // -- W e a k M a p
- InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, true, true);
- }
- { // -- W e a k S e t
- InstallFunction(global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize,
+ { // -- S e t
+ InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
isolate()->initial_object_prototype(),
Builtins::kIllegal, true, true);
}
@@ -1388,18 +1390,19 @@ void Genesis::InitializeExperimentalGlobal() {
// Create maps for generator functions and their prototypes. Store those
// maps in the native context.
- Handle<Map> function_map(native_context()->function_map());
+ Handle<Map> function_map(native_context()->sloppy_function_map());
Handle<Map> generator_function_map = factory()->CopyMap(function_map);
generator_function_map->set_prototype(*generator_function_prototype);
- native_context()->set_generator_function_map(*generator_function_map);
+ native_context()->set_sloppy_generator_function_map(
+ *generator_function_map);
Handle<Map> strict_mode_function_map(
- native_context()->strict_mode_function_map());
+ native_context()->strict_function_map());
Handle<Map> strict_mode_generator_function_map = factory()->CopyMap(
strict_mode_function_map);
strict_mode_generator_function_map->set_prototype(
*generator_function_prototype);
- native_context()->set_strict_mode_generator_function_map(
+ native_context()->set_strict_generator_function_map(
*strict_mode_generator_function_map);
Handle<Map> object_map(native_context()->object_function()->initial_map());
@@ -1461,6 +1464,7 @@ bool Genesis::CompileExperimentalBuiltin(Isolate* isolate, int index) {
Handle<String> source_code =
factory->NewStringFromAscii(
ExperimentalNatives::GetRawScriptSource(index));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, source_code, false);
return CompileNative(isolate, name, source_code);
}
@@ -1510,6 +1514,7 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
if (cache == NULL || !cache->Lookup(name, &function_info)) {
ASSERT(source->IsOneByteRepresentation());
Handle<String> script_name = factory->NewStringFromUtf8(name);
+ ASSERT(!script_name.is_null());
function_info = Compiler::CompileScript(
source,
script_name,
@@ -1519,7 +1524,7 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
top_context,
extension,
NULL,
- Handle<String>::null(),
+ NO_CACHED_DATA,
use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE);
if (function_info.is_null()) return false;
if (cache != NULL) cache->Add(name, function_info);
@@ -1562,6 +1567,7 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
void Genesis::InstallNativeFunctions() {
HandleScope scope(isolate());
INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun);
+
INSTALL_NATIVE(JSFunction, "ToNumber", to_number_fun);
INSTALL_NATIVE(JSFunction, "ToString", to_string_fun);
INSTALL_NATIVE(JSFunction, "ToDetailString", to_detail_string_fun);
@@ -1569,6 +1575,7 @@ void Genesis::InstallNativeFunctions() {
INSTALL_NATIVE(JSFunction, "ToInteger", to_integer_fun);
INSTALL_NATIVE(JSFunction, "ToUint32", to_uint32_fun);
INSTALL_NATIVE(JSFunction, "ToInt32", to_int32_fun);
+
INSTALL_NATIVE(JSFunction, "GlobalEval", global_eval_fun);
INSTALL_NATIVE(JSFunction, "Instantiate", instantiate_fun);
INSTALL_NATIVE(JSFunction, "ConfigureTemplateInstance",
@@ -1577,25 +1584,34 @@ void Genesis::InstallNativeFunctions() {
INSTALL_NATIVE(JSObject, "functionCache", function_cache);
INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor",
to_complete_property_descriptor);
+
+ INSTALL_NATIVE(JSFunction, "IsPromise", is_promise);
+ INSTALL_NATIVE(JSFunction, "PromiseCreate", promise_create);
+ INSTALL_NATIVE(JSFunction, "PromiseResolve", promise_resolve);
+ INSTALL_NATIVE(JSFunction, "PromiseReject", promise_reject);
+ INSTALL_NATIVE(JSFunction, "PromiseChain", promise_chain);
+ INSTALL_NATIVE(JSFunction, "PromiseCatch", promise_catch);
+
+ INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change);
+ INSTALL_NATIVE(JSFunction, "EnqueueSpliceRecord", observers_enqueue_splice);
+ INSTALL_NATIVE(JSFunction, "BeginPerformSplice",
+ observers_begin_perform_splice);
+ INSTALL_NATIVE(JSFunction, "EndPerformSplice",
+ observers_end_perform_splice);
}
void Genesis::InstallExperimentalNativeFunctions() {
INSTALL_NATIVE(JSFunction, "RunMicrotasks", run_microtasks);
+ INSTALL_NATIVE(JSFunction, "EnqueueExternalMicrotask",
+ enqueue_external_microtask);
+
if (FLAG_harmony_proxies) {
INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
}
- if (FLAG_harmony_observation) {
- INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change);
- INSTALL_NATIVE(JSFunction, "EnqueueSpliceRecord", observers_enqueue_splice);
- INSTALL_NATIVE(JSFunction, "BeginPerformSplice",
- observers_begin_perform_splice);
- INSTALL_NATIVE(JSFunction, "EndPerformSplice",
- observers_end_perform_splice);
- }
}
#undef INSTALL_NATIVE
@@ -1751,9 +1767,6 @@ bool Genesis::InstallNatives() {
STATIC_ASCII_VECTOR("column_offset")));
Handle<Foreign> script_column_offset(
factory()->NewForeign(&Accessors::ScriptColumnOffset));
- Handle<String> data_string(factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("data")));
- Handle<Foreign> script_data(factory()->NewForeign(&Accessors::ScriptData));
Handle<String> type_string(factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("type")));
Handle<Foreign> script_type(factory()->NewForeign(&Accessors::ScriptType));
@@ -1818,11 +1831,6 @@ bool Genesis::InstallNatives() {
}
{
- CallbacksDescriptor d(*data_string, *script_data, attribs);
- script_map->AppendDescriptor(&d, witness);
- }
-
- {
CallbacksDescriptor d(*type_string, *script_type, attribs);
script_map->AppendDescriptor(&d, witness);
}
@@ -2047,8 +2055,6 @@ bool Genesis::InstallExperimentalNatives() {
INSTALL_EXPERIMENTAL_NATIVE(i, symbols, "symbol.js")
INSTALL_EXPERIMENTAL_NATIVE(i, proxies, "proxy.js")
INSTALL_EXPERIMENTAL_NATIVE(i, collections, "collection.js")
- INSTALL_EXPERIMENTAL_NATIVE(i, observation, "object-observe.js")
- INSTALL_EXPERIMENTAL_NATIVE(i, promises, "promise.js")
INSTALL_EXPERIMENTAL_NATIVE(i, generators, "generator.js")
INSTALL_EXPERIMENTAL_NATIVE(i, iteration, "array-iterator.js")
INSTALL_EXPERIMENTAL_NATIVE(i, strings, "harmony-string.js")
@@ -2057,7 +2063,7 @@ bool Genesis::InstallExperimentalNatives() {
}
InstallExperimentalNativeFunctions();
-
+ InstallExperimentalBuiltinFunctionIds();
return true;
}
@@ -2076,8 +2082,10 @@ static Handle<JSObject> ResolveBuiltinIdHolder(
ASSERT_EQ(".prototype", period_pos);
Vector<const char> property(holder_expr,
static_cast<int>(period_pos - holder_expr));
+ Handle<String> property_string = factory->InternalizeUtf8String(property);
+ ASSERT(!property_string.is_null());
Handle<JSFunction> function = Handle<JSFunction>::cast(
- GetProperty(isolate, global, factory->InternalizeUtf8String(property)));
+ GetProperty(isolate, global, property_string));
return Handle<JSObject>(JSObject::cast(function->prototype()));
}
@@ -2107,6 +2115,15 @@ void Genesis::InstallBuiltinFunctionIds() {
}
+void Genesis::InstallExperimentalBuiltinFunctionIds() {
+ HandleScope scope(isolate());
+ if (FLAG_harmony_maths) {
+ Handle<JSObject> holder = ResolveBuiltinIdHolder(native_context(), "Math");
+ InstallBuiltinFunctionId(holder, "clz32", kMathClz32);
+ }
+}
+
+
// Do not forget to update macros.py with named constant
// of cache id.
#define JSFUNCTION_RESULT_CACHE_LIST(F) \
@@ -2336,6 +2353,8 @@ bool Genesis::InstallExtension(Isolate* isolate,
}
Handle<String> source_code =
isolate->factory()->NewExternalStringFromAscii(extension->source());
+ // We do not expect this to throw an exception. Change this if it does.
+ CHECK_NOT_EMPTY_HANDLE(isolate, source_code);
bool result = CompileScriptCached(isolate,
CStrVector(extension->name()),
source_code,
@@ -2546,13 +2565,14 @@ void Genesis::MakeFunctionInstancePrototypeWritable() {
// The maps with writable prototype are created in CreateEmptyFunction
// and CreateStrictModeFunctionMaps respectively. Initially the maps are
// created with read-only prototype for JS builtins processing.
- ASSERT(!function_map_writable_prototype_.is_null());
- ASSERT(!strict_mode_function_map_writable_prototype_.is_null());
+ ASSERT(!sloppy_function_map_writable_prototype_.is_null());
+ ASSERT(!strict_function_map_writable_prototype_.is_null());
// Replace function instance maps to make prototype writable.
- native_context()->set_function_map(*function_map_writable_prototype_);
- native_context()->set_strict_mode_function_map(
- *strict_mode_function_map_writable_prototype_);
+ native_context()->set_sloppy_function_map(
+ *sloppy_function_map_writable_prototype_);
+ native_context()->set_strict_function_map(
+ *strict_function_map_writable_prototype_);
}
@@ -2566,7 +2586,9 @@ class NoTrackDoubleFieldsForSerializerScope {
}
}
~NoTrackDoubleFieldsForSerializerScope() {
- FLAG_track_double_fields = flag_;
+ if (Serializer::enabled()) {
+ FLAG_track_double_fields = flag_;
+ }
}
private:
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 14dd1bd99..e683a45f0 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -73,6 +73,7 @@ class SourceCodeCache BASE_EMBEDDED {
cache_->CopyTo(0, *new_array, 0, cache_->length());
cache_ = *new_array;
Handle<String> str = factory->NewStringFromAscii(name, TENURED);
+ ASSERT(!str.is_null());
cache_->set(length, *str);
cache_->set(length + 1, *shared);
Script::cast(shared->script())->set_type(Smi::FromInt(type_));
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index e68890fcb..689e845ba 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -268,13 +268,12 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
// Maintain marking consistency for HeapObjectIterator and
// IncrementalMarking.
int size_delta = to_trim * entry_size;
- if (heap->marking()->TransferMark(elms->address(),
- elms->address() + size_delta)) {
- MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
- }
+ Address new_start = elms->address() + size_delta;
+ heap->marking()->TransferMark(elms->address(), new_start);
+ heap->AdjustLiveBytes(new_start, -size_delta, Heap::FROM_MUTATOR);
- FixedArrayBase* new_elms = FixedArrayBase::cast(HeapObject::FromAddress(
- elms->address() + size_delta));
+ FixedArrayBase* new_elms =
+ FixedArrayBase::cast(HeapObject::FromAddress(new_start));
HeapProfiler* profiler = heap->isolate()->heap_profiler();
if (profiler->is_tracking_object_moves()) {
profiler->ObjectMoveEvent(elms->address(),
@@ -301,33 +300,35 @@ static bool ArrayPrototypeHasNoElements(Heap* heap,
}
+// Returns empty handle if not applicable.
MUST_USE_RESULT
-static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
- Heap* heap, Object* receiver, Arguments* args, int first_added_arg) {
- if (!receiver->IsJSArray()) return NULL;
- JSArray* array = JSArray::cast(receiver);
- if (array->map()->is_observed()) return NULL;
- if (!array->map()->is_extensible()) return NULL;
- HeapObject* elms = array->elements();
+static inline Handle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
+ Isolate* isolate,
+ Handle<Object> receiver,
+ Arguments* args,
+ int first_added_arg) {
+ if (!receiver->IsJSArray()) return Handle<FixedArrayBase>::null();
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ if (array->map()->is_observed()) return Handle<FixedArrayBase>::null();
+ if (!array->map()->is_extensible()) return Handle<FixedArrayBase>::null();
+ Handle<FixedArrayBase> elms(array->elements());
+ Heap* heap = isolate->heap();
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
if (args == NULL || array->HasFastObjectElements()) return elms;
} else if (map == heap->fixed_cow_array_map()) {
- MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
- if (args == NULL || array->HasFastObjectElements() ||
- !maybe_writable_result->To(&elms)) {
- return maybe_writable_result;
- }
+ elms = JSObject::EnsureWritableFastElements(array);
+ if (args == NULL || array->HasFastObjectElements()) return elms;
} else if (map == heap->fixed_double_array_map()) {
if (args == NULL) return elms;
} else {
- return NULL;
+ return Handle<FixedArrayBase>::null();
}
// Need to ensure that the arguments passed in args can be contained in
// the array.
int args_length = args->length();
- if (first_added_arg >= args_length) return array->elements();
+ if (first_added_arg >= args_length) return handle(array->elements());
ElementsKind origin_kind = array->map()->elements_kind();
ASSERT(!IsFastObjectElementsKind(origin_kind));
@@ -346,14 +347,14 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
}
}
if (target_kind != origin_kind) {
- MaybeObject* maybe_failure = array->TransitionElementsKind(target_kind);
- if (maybe_failure->IsFailure()) return maybe_failure;
- return array->elements();
+ JSObject::TransitionElementsKind(array, target_kind);
+ return handle(array->elements());
}
return elms;
}
+// TODO(ishell): Handlify when all Array* builtins are handlified.
static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
JSArray* receiver) {
if (!FLAG_clever_optimizations) return false;
@@ -393,23 +394,19 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
BUILTIN(ArrayPush) {
- Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
- if (maybe_elms_obj == NULL) {
- return CallJsBuiltin(isolate, "ArrayPush", args);
- }
- if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
+ HandleScope scope(isolate);
+ Handle<Object> receiver = args.receiver();
+ Handle<FixedArrayBase> elms_obj =
+ EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1);
+ if (elms_obj.is_null()) return CallJsBuiltin(isolate, "ArrayPush", args);
- JSArray* array = JSArray::cast(receiver);
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
ASSERT(!array->map()->is_observed());
ElementsKind kind = array->GetElementsKind();
if (IsFastSmiOrObjectElementsKind(kind)) {
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@@ -425,16 +422,13 @@ BUILTIN(ArrayPush) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- FixedArray* new_elms;
- MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->To(&new_elms)) return maybe_obj;
+ Handle<FixedArray> new_elms =
+ isolate->factory()->NewUninitializedFixedArray(capacity);
ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, kind, new_elms, 0,
- ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ accessor->CopyElements(
+ Handle<JSObject>::null(), 0, kind, new_elms, 0,
+ ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
elms = new_elms;
}
@@ -446,8 +440,8 @@ BUILTIN(ArrayPush) {
elms->set(index + len, args[index + 1], mode);
}
- if (elms != array->elements()) {
- array->set_elements(elms);
+ if (*elms != array->elements()) {
+ array->set_elements(*elms);
}
// Set the length.
@@ -467,25 +461,22 @@ BUILTIN(ArrayPush) {
int new_length = len + to_add;
- FixedDoubleArray* new_elms;
+ Handle<FixedDoubleArray> new_elms;
if (new_length > elms_len) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- MaybeObject* maybe_obj =
- heap->AllocateUninitializedFixedDoubleArray(capacity);
- if (!maybe_obj->To(&new_elms)) return maybe_obj;
+ new_elms = isolate->factory()->NewFixedDoubleArray(capacity);
ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, kind, new_elms, 0,
- ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ accessor->CopyElements(
+ Handle<JSObject>::null(), 0, kind, new_elms, 0,
+ ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
+
} else {
// to_add is > 0 and new_length <= elms_len, so elms_obj cannot be the
// empty_fixed_array.
- new_elms = FixedDoubleArray::cast(elms_obj);
+ new_elms = Handle<FixedDoubleArray>::cast(elms_obj);
}
// Add the provided values.
@@ -496,8 +487,8 @@ BUILTIN(ArrayPush) {
new_elms->set(index + len, arg->Number());
}
- if (new_elms != array->elements()) {
- array->set_elements(new_elms);
+ if (*new_elms != array->elements()) {
+ array->set_elements(*new_elms);
}
// Set the length.
@@ -507,51 +498,62 @@ BUILTIN(ArrayPush) {
}
+// TODO(ishell): Temporary wrapper until handlified.
+static bool ElementsAccessorHasElementWrapper(
+ ElementsAccessor* accessor,
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
+ uint32_t key,
+ Handle<FixedArrayBase> backing_store = Handle<FixedArrayBase>::null()) {
+ return accessor->HasElement(*receiver, *holder, key,
+ backing_store.is_null() ? NULL : *backing_store);
+}
+
+
BUILTIN(ArrayPop) {
- Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
- if (!maybe_elms->To(&elms_obj)) return maybe_elms;
-
- JSArray* array = JSArray::cast(receiver);
+ HandleScope scope(isolate);
+ Handle<Object> receiver = args.receiver();
+ Handle<FixedArrayBase> elms_obj =
+ EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
+ if (elms_obj.is_null()) return CallJsBuiltin(isolate, "ArrayPop", args);
+
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
ASSERT(!array->map()->is_observed());
int len = Smi::cast(array->length())->value();
- if (len == 0) return heap->undefined_value();
+ if (len == 0) return isolate->heap()->undefined_value();
ElementsAccessor* accessor = array->GetElementsAccessor();
int new_length = len - 1;
- MaybeObject* maybe_result;
- if (accessor->HasElement(array, array, new_length, elms_obj)) {
- maybe_result = accessor->Get(array, array, new_length, elms_obj);
+ Handle<Object> element;
+ if (ElementsAccessorHasElementWrapper(
+ accessor, array, array, new_length, elms_obj)) {
+ element = accessor->Get(
+ array, array, new_length, elms_obj);
} else {
- maybe_result = array->GetPrototype()->GetElement(isolate, len - 1);
+ Handle<Object> proto(array->GetPrototype(), isolate);
+ element = Object::GetElement(isolate, proto, len - 1);
}
- if (maybe_result->IsFailure()) return maybe_result;
- MaybeObject* maybe_failure =
- accessor->SetLength(array, Smi::FromInt(new_length));
- if (maybe_failure->IsFailure()) return maybe_failure;
- return maybe_result;
+ RETURN_IF_EMPTY_HANDLE(isolate, element);
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ accessor->SetLength(
+ array, handle(Smi::FromInt(new_length), isolate)));
+ return *element;
}
BUILTIN(ArrayShift) {
+ HandleScope scope(isolate);
Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArrayShift", args);
- if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
-
- if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+ Handle<Object> receiver = args.receiver();
+ Handle<FixedArrayBase> elms_obj =
+ EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
+ if (elms_obj.is_null() ||
+ !IsJSArrayFastElementMovingAllowed(heap,
+ *Handle<JSArray>::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayShift", args);
}
- JSArray* array = JSArray::cast(receiver);
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
ASSERT(!array->map()->is_observed());
int len = Smi::cast(array->length())->value();
@@ -559,25 +561,24 @@ BUILTIN(ArrayShift) {
// Get first element
ElementsAccessor* accessor = array->GetElementsAccessor();
- Object* first;
- MaybeObject* maybe_first = accessor->Get(receiver, array, 0, elms_obj);
- if (!maybe_first->To(&first)) return maybe_first;
+ Handle<Object> first = accessor->Get(receiver, array, 0, elms_obj);
+ RETURN_IF_EMPTY_HANDLE(isolate, first);
if (first->IsTheHole()) {
- first = heap->undefined_value();
+ first = isolate->factory()->undefined_value();
}
- if (!heap->lo_space()->Contains(elms_obj)) {
- array->set_elements(LeftTrimFixedArray(heap, elms_obj, 1));
+ if (!heap->CanMoveObjectStart(*elms_obj)) {
+ array->set_elements(LeftTrimFixedArray(heap, *elms_obj, 1));
} else {
// Shift the elements.
if (elms_obj->IsFixedArray()) {
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
DisallowHeapAllocation no_gc;
- heap->MoveElements(elms, 0, 1, len - 1);
+ heap->MoveElements(*elms, 0, 1, len - 1);
elms->set(len - 1, heap->the_hole_value());
} else {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
- MoveDoubleElements(elms, 0, elms, 1, len - 1);
+ Handle<FixedDoubleArray> elms = Handle<FixedDoubleArray>::cast(elms_obj);
+ MoveDoubleElements(*elms, 0, *elms, 1, len - 1);
elms->set_the_hole(len - 1);
}
}
@@ -585,29 +586,27 @@ BUILTIN(ArrayShift) {
// Set the length.
array->set_length(Smi::FromInt(len - 1));
- return first;
+ return *first;
}
BUILTIN(ArrayUnshift) {
+ HandleScope scope(isolate);
Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArrayUnshift", args);
- if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
-
- if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+ Handle<Object> receiver = args.receiver();
+ Handle<FixedArrayBase> elms_obj =
+ EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
+ if (elms_obj.is_null() ||
+ !IsJSArrayFastElementMovingAllowed(heap,
+ *Handle<JSArray>::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
- JSArray* array = JSArray::cast(receiver);
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
ASSERT(!array->map()->is_observed());
if (!array->HasFastSmiOrObjectElements()) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@@ -616,31 +615,26 @@ BUILTIN(ArrayUnshift) {
// we should never hit this case.
ASSERT(to_add <= (Smi::kMaxValue - len));
- MaybeObject* maybe_object =
- array->EnsureCanContainElements(&args, 1, to_add,
- DONT_ALLOW_DOUBLE_ELEMENTS);
- if (maybe_object->IsFailure()) return maybe_object;
+ JSObject::EnsureCanContainElements(array, &args, 1, to_add,
+ DONT_ALLOW_DOUBLE_ELEMENTS);
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- FixedArray* new_elms;
- MaybeObject* maybe_elms = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_elms->To(&new_elms)) return maybe_elms;
+ Handle<FixedArray> new_elms =
+ isolate->factory()->NewUninitializedFixedArray(capacity);
ElementsKind kind = array->GetElementsKind();
ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, kind, new_elms, to_add,
- ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ accessor->CopyElements(
+ Handle<JSObject>::null(), 0, kind, new_elms, to_add,
+ ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
elms = new_elms;
- array->set_elements(elms);
+ array->set_elements(*elms);
} else {
DisallowHeapAllocation no_gc;
- heap->MoveElements(elms, to_add, 0, len);
+ heap->MoveElements(*elms, to_add, 0, len);
}
// Add the provided values.
@@ -657,18 +651,19 @@ BUILTIN(ArrayUnshift) {
BUILTIN(ArraySlice) {
+ HandleScope scope(isolate);
Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms;
+ Handle<Object> receiver = args.receiver();
+ Handle<FixedArrayBase> elms;
int len = -1;
if (receiver->IsJSArray()) {
- JSArray* array = JSArray::cast(receiver);
- if (!IsJSArrayFastElementMovingAllowed(heap, array)) {
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ if (!IsJSArrayFastElementMovingAllowed(heap, *array)) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
if (array->HasFastElements()) {
- elms = array->elements();
+ elms = handle(array->elements());
} else {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -677,33 +672,34 @@ BUILTIN(ArraySlice) {
} else {
// Array.slice(arguments, ...) is quite a common idiom (notably more
// than 50% of invocations in Web apps). Treat it in C++ as well.
- Map* arguments_map =
- isolate->context()->native_context()->arguments_boilerplate()->map();
+ Handle<Map> arguments_map(isolate->context()->native_context()->
+ sloppy_arguments_boilerplate()->map());
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject() &&
- JSObject::cast(receiver)->map() == arguments_map;
+ Handle<JSObject>::cast(receiver)->map() == *arguments_map;
if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- JSObject* object = JSObject::cast(receiver);
+ Handle<JSObject> object = Handle<JSObject>::cast(receiver);
if (object->HasFastElements()) {
- elms = object->elements();
+ elms = handle(object->elements());
} else {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
+ Handle<Object> len_obj(
+ object->InObjectPropertyAt(Heap::kArgumentsLengthIndex), isolate);
if (!len_obj->IsSmi()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- len = Smi::cast(len_obj)->value();
+ len = Handle<Smi>::cast(len_obj)->value();
if (len > elms->length()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
}
- JSObject* object = JSObject::cast(receiver);
+ Handle<JSObject> object = Handle<JSObject>::cast(receiver);
ASSERT(len >= 0);
int n_arguments = args.length() - 1;
@@ -714,11 +710,11 @@ BUILTIN(ArraySlice) {
int relative_start = 0;
int relative_end = len;
if (n_arguments > 0) {
- Object* arg1 = args[1];
+ Handle<Object> arg1 = args.at<Object>(1);
if (arg1->IsSmi()) {
- relative_start = Smi::cast(arg1)->value();
+ relative_start = Handle<Smi>::cast(arg1)->value();
} else if (arg1->IsHeapNumber()) {
- double start = HeapNumber::cast(arg1)->value();
+ double start = Handle<HeapNumber>::cast(arg1)->value();
if (start < kMinInt || start > kMaxInt) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -727,11 +723,11 @@ BUILTIN(ArraySlice) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
if (n_arguments > 1) {
- Object* arg2 = args[2];
+ Handle<Object> arg2 = args.at<Object>(2);
if (arg2->IsSmi()) {
- relative_end = Smi::cast(arg2)->value();
+ relative_end = Handle<Smi>::cast(arg2)->value();
} else if (arg2->IsHeapNumber()) {
- double end = HeapNumber::cast(arg2)->value();
+ double end = Handle<HeapNumber>::cast(arg2)->value();
if (end < kMinInt || end > kMaxInt) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -758,7 +754,8 @@ BUILTIN(ArraySlice) {
bool packed = true;
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
for (int i = k; i < final; i++) {
- if (!accessor->HasElement(object, object, i, elms)) {
+ if (!ElementsAccessorHasElementWrapper(
+ accessor, object, object, i, elms)) {
packed = false;
break;
}
@@ -770,40 +767,31 @@ BUILTIN(ArraySlice) {
}
}
- JSArray* result_array;
- MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(kind,
- result_len,
- result_len);
+ Handle<JSArray> result_array =
+ isolate->factory()->NewJSArray(kind, result_len, result_len);
DisallowHeapAllocation no_gc;
- if (result_len == 0) return maybe_array;
- if (!maybe_array->To(&result_array)) return maybe_array;
+ if (result_len == 0) return *result_array;
ElementsAccessor* accessor = object->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, k, kind, result_array->elements(), 0, result_len, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
-
- return result_array;
+ accessor->CopyElements(Handle<JSObject>::null(), k, kind,
+ handle(result_array->elements()), 0, result_len, elms);
+ return *result_array;
}
BUILTIN(ArraySplice) {
+ HandleScope scope(isolate);
Heap* heap = isolate->heap();
- Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms =
- EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
- if (maybe_elms == NULL) {
- return CallJsBuiltin(isolate, "ArraySplice", args);
- }
- if (!maybe_elms->To(&elms_obj)) return maybe_elms;
-
- if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+ Handle<Object> receiver = args.receiver();
+ Handle<FixedArrayBase> elms_obj =
+ EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3);
+ if (elms_obj.is_null() ||
+ !IsJSArrayFastElementMovingAllowed(heap,
+ *Handle<JSArray>::cast(receiver))) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
- JSArray* array = JSArray::cast(receiver);
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
ASSERT(!array->map()->is_observed());
int len = Smi::cast(array->length())->value();
@@ -812,11 +800,11 @@ BUILTIN(ArraySplice) {
int relative_start = 0;
if (n_arguments > 0) {
- Object* arg1 = args[1];
+ Handle<Object> arg1 = args.at<Object>(1);
if (arg1->IsSmi()) {
- relative_start = Smi::cast(arg1)->value();
+ relative_start = Handle<Smi>::cast(arg1)->value();
} else if (arg1->IsHeapNumber()) {
- double start = HeapNumber::cast(arg1)->value();
+ double start = Handle<HeapNumber>::cast(arg1)->value();
if (start < kMinInt || start > kMaxInt) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
@@ -861,72 +849,83 @@ BUILTIN(ArraySplice) {
}
if (new_length == 0) {
- MaybeObject* maybe_array = heap->AllocateJSArrayWithElements(
+ Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(
elms_obj, elements_kind, actual_delete_count);
- if (maybe_array->IsFailure()) return maybe_array;
array->set_elements(heap->empty_fixed_array());
array->set_length(Smi::FromInt(0));
- return maybe_array;
+ return *result;
}
- JSArray* result_array = NULL;
- MaybeObject* maybe_array =
- heap->AllocateJSArrayAndStorage(elements_kind,
- actual_delete_count,
- actual_delete_count);
- if (!maybe_array->To(&result_array)) return maybe_array;
+ Handle<JSArray> result_array =
+ isolate->factory()->NewJSArray(elements_kind,
+ actual_delete_count,
+ actual_delete_count);
if (actual_delete_count > 0) {
DisallowHeapAllocation no_gc;
ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, actual_start, elements_kind, result_array->elements(),
- 0, actual_delete_count, elms_obj);
- // Cannot fail since the origin and target array are of the same elements
- // kind.
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ accessor->CopyElements(
+ Handle<JSObject>::null(), actual_start, elements_kind,
+ handle(result_array->elements()), 0, actual_delete_count, elms_obj);
}
bool elms_changed = false;
if (item_count < actual_delete_count) {
// Shrink the array.
- const bool trim_array = !heap->lo_space()->Contains(elms_obj) &&
+ const bool trim_array = !heap->lo_space()->Contains(*elms_obj) &&
((actual_start + item_count) <
(len - actual_delete_count - actual_start));
if (trim_array) {
const int delta = actual_delete_count - item_count;
if (elms_obj->IsFixedDoubleArray()) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
- MoveDoubleElements(elms, delta, elms, 0, actual_start);
+ Handle<FixedDoubleArray> elms =
+ Handle<FixedDoubleArray>::cast(elms_obj);
+ MoveDoubleElements(*elms, delta, *elms, 0, actual_start);
} else {
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
DisallowHeapAllocation no_gc;
- heap->MoveElements(elms, delta, 0, actual_start);
+ heap->MoveElements(*elms, delta, 0, actual_start);
}
- elms_obj = LeftTrimFixedArray(heap, elms_obj, delta);
-
+ if (heap->CanMoveObjectStart(*elms_obj)) {
+ // On the fast path we move the start of the object in memory.
+ elms_obj = handle(LeftTrimFixedArray(heap, *elms_obj, delta));
+ } else {
+ // This is the slow path. We are going to move the elements to the left
+ // by copying them. For trimmed values we store the hole.
+ if (elms_obj->IsFixedDoubleArray()) {
+ Handle<FixedDoubleArray> elms =
+ Handle<FixedDoubleArray>::cast(elms_obj);
+ MoveDoubleElements(*elms, 0, *elms, delta, len - delta);
+ FillWithHoles(*elms, len - delta, len);
+ } else {
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
+ DisallowHeapAllocation no_gc;
+ heap->MoveElements(*elms, 0, delta, len - delta);
+ FillWithHoles(heap, *elms, len - delta, len);
+ }
+ }
elms_changed = true;
} else {
if (elms_obj->IsFixedDoubleArray()) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
- MoveDoubleElements(elms, actual_start + item_count,
- elms, actual_start + actual_delete_count,
+ Handle<FixedDoubleArray> elms =
+ Handle<FixedDoubleArray>::cast(elms_obj);
+ MoveDoubleElements(*elms, actual_start + item_count,
+ *elms, actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
- FillWithHoles(elms, new_length, len);
+ FillWithHoles(*elms, new_length, len);
} else {
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
DisallowHeapAllocation no_gc;
- heap->MoveElements(elms, actual_start + item_count,
+ heap->MoveElements(*elms, actual_start + item_count,
actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
- FillWithHoles(heap, elms, new_length, len);
+ FillWithHoles(heap, *elms, new_length, len);
}
}
} else if (item_count > actual_delete_count) {
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
@@ -935,9 +934,8 @@ BUILTIN(ArraySplice) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- FixedArray* new_elms;
- MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->To(&new_elms)) return maybe_obj;
+ Handle<FixedArray> new_elms =
+ isolate->factory()->NewUninitializedFixedArray(capacity);
DisallowHeapAllocation no_gc;
@@ -945,30 +943,26 @@ BUILTIN(ArraySplice) {
ElementsAccessor* accessor = array->GetElementsAccessor();
if (actual_start > 0) {
// Copy the part before actual_start as is.
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, kind, new_elms, 0, actual_start, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ accessor->CopyElements(
+ Handle<JSObject>::null(), 0, kind, new_elms, 0, actual_start, elms);
}
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, actual_start + actual_delete_count, kind, new_elms,
- actual_start + item_count,
+ accessor->CopyElements(
+ Handle<JSObject>::null(), actual_start + actual_delete_count, kind,
+ new_elms, actual_start + item_count,
ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
elms_obj = new_elms;
elms_changed = true;
} else {
DisallowHeapAllocation no_gc;
- heap->MoveElements(elms, actual_start + item_count,
+ heap->MoveElements(*elms, actual_start + item_count,
actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
}
}
if (IsFastDoubleElementsKind(elements_kind)) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
+ Handle<FixedDoubleArray> elms = Handle<FixedDoubleArray>::cast(elms_obj);
for (int k = actual_start; k < actual_start + item_count; k++) {
Object* arg = args[3 + k - actual_start];
if (arg->IsSmi()) {
@@ -978,7 +972,7 @@ BUILTIN(ArraySplice) {
}
}
} else {
- FixedArray* elms = FixedArray::cast(elms_obj);
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int k = actual_start; k < actual_start + item_count; k++) {
@@ -987,21 +981,22 @@ BUILTIN(ArraySplice) {
}
if (elms_changed) {
- array->set_elements(elms_obj);
+ array->set_elements(*elms_obj);
}
// Set the length.
array->set_length(Smi::FromInt(new_length));
- return result_array;
+ return *result_array;
}
BUILTIN(ArrayConcat) {
+ HandleScope scope(isolate);
Heap* heap = isolate->heap();
- Context* native_context = isolate->context()->native_context();
- JSObject* array_proto =
- JSObject::cast(native_context->array_function()->prototype());
- if (!ArrayPrototypeHasNoElements(heap, native_context, array_proto)) {
+ Handle<Context> native_context(isolate->context()->native_context());
+ Handle<JSObject> array_proto(
+ JSObject::cast(native_context->array_function()->prototype()));
+ if (!ArrayPrototypeHasNoElements(heap, *native_context, *array_proto)) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
@@ -1013,13 +1008,13 @@ BUILTIN(ArrayConcat) {
bool has_double = false;
bool is_holey = false;
for (int i = 0; i < n_arguments; i++) {
- Object* arg = args[i];
+ Handle<Object> arg = args.at<Object>(i);
if (!arg->IsJSArray() ||
- !JSArray::cast(arg)->HasFastElements() ||
- JSArray::cast(arg)->GetPrototype() != array_proto) {
+ !Handle<JSArray>::cast(arg)->HasFastElements() ||
+ Handle<JSArray>::cast(arg)->GetPrototype() != *array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
- int len = Smi::cast(JSArray::cast(arg)->length())->value();
+ int len = Smi::cast(Handle<JSArray>::cast(arg)->length())->value();
// We shouldn't overflow when adding another len.
const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
@@ -1032,7 +1027,7 @@ BUILTIN(ArrayConcat) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
- ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind();
+ ElementsKind arg_kind = Handle<JSArray>::cast(arg)->map()->elements_kind();
has_double = has_double || IsFastDoubleElementsKind(arg_kind);
is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
if (IsMoreGeneralElementsKindTransition(elements_kind, arg_kind)) {
@@ -1048,34 +1043,29 @@ BUILTIN(ArrayConcat) {
ArrayStorageAllocationMode mode =
has_double && IsFastObjectElementsKind(elements_kind)
? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE : DONT_INITIALIZE_ARRAY_ELEMENTS;
- JSArray* result_array;
- // Allocate result.
- MaybeObject* maybe_array =
- heap->AllocateJSArrayAndStorage(elements_kind,
- result_len,
- result_len,
- mode);
- if (!maybe_array->To(&result_array)) return maybe_array;
- if (result_len == 0) return result_array;
+ Handle<JSArray> result_array =
+ isolate->factory()->NewJSArray(elements_kind,
+ result_len,
+ result_len,
+ mode);
+ if (result_len == 0) return *result_array;
int j = 0;
- FixedArrayBase* storage = result_array->elements();
+ Handle<FixedArrayBase> storage(result_array->elements());
ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
for (int i = 0; i < n_arguments; i++) {
- JSArray* array = JSArray::cast(args[i]);
+ Handle<JSArray> array = args.at<JSArray>(i);
int len = Smi::cast(array->length())->value();
ElementsKind from_kind = array->GetElementsKind();
if (len > 0) {
- MaybeObject* maybe_failure =
- accessor->CopyElements(array, 0, from_kind, storage, j, len);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ accessor->CopyElements(array, 0, from_kind, storage, j, len);
j += len;
}
}
ASSERT(j == result_len);
- return result_array;
+ return *result_array;
}
@@ -1174,7 +1164,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
}
SharedFunctionInfo* shared = function->shared();
- if (shared->is_classic_mode() && !shared->native()) {
+ if (shared->strict_mode() == SLOPPY && !shared->native()) {
Object* recv = args[0];
ASSERT(!recv->IsNull());
if (recv->IsUndefined()) {
@@ -1320,9 +1310,7 @@ static void Generate_LoadIC_Normal(MacroAssembler* masm) {
static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
- LoadStubCompiler::GenerateLoadViaGetter(
- masm, Handle<HeapType>::null(),
- LoadStubCompiler::registers()[0], Handle<JSFunction>());
+ LoadStubCompiler::GenerateLoadViaGetterForDeopt(masm);
}
@@ -1366,8 +1354,8 @@ static void Generate_KeyedLoadIC_IndexedInterceptor(MacroAssembler* masm) {
}
-static void Generate_KeyedLoadIC_NonStrictArguments(MacroAssembler* masm) {
- KeyedLoadIC::GenerateNonStrictArguments(masm);
+static void Generate_KeyedLoadIC_SloppyArguments(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateSloppyArguments(masm);
}
@@ -1387,18 +1375,17 @@ static void Generate_StoreIC_Normal(MacroAssembler* masm) {
static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
- StoreStubCompiler::GenerateStoreViaSetter(
- masm, Handle<HeapType>::null(), Handle<JSFunction>());
+ StoreStubCompiler::GenerateStoreViaSetterForDeopt(masm);
}
static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
- KeyedStoreIC::GenerateGeneric(masm, kNonStrictMode);
+ KeyedStoreIC::GenerateGeneric(masm, SLOPPY);
}
static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
- KeyedStoreIC::GenerateGeneric(masm, kStrictMode);
+ KeyedStoreIC::GenerateGeneric(masm, STRICT);
}
@@ -1432,8 +1419,8 @@ static void Generate_KeyedStoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
}
-static void Generate_KeyedStoreIC_NonStrictArguments(MacroAssembler* masm) {
- KeyedStoreIC::GenerateNonStrictArguments(masm);
+static void Generate_KeyedStoreIC_SloppyArguments(MacroAssembler* masm) {
+ KeyedStoreIC::GenerateSloppyArguments(masm);
}
@@ -1599,9 +1586,7 @@ void Builtins::InitBuiltinFunctionTable() {
functions->c_code = NULL; \
functions->s_name = #aname; \
functions->name = k##aname; \
- functions->flags = Code::ComputeFlags( \
- Code::HANDLER, MONOMORPHIC, kNoExtraICState, \
- Code::NORMAL, Code::kind); \
+ functions->flags = Code::ComputeHandlerFlags(Code::kind); \
functions->extra_args = NO_EXTRA_ARGUMENTS; \
++functions;
@@ -1627,7 +1612,9 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
// For now we generate builtin adaptor code into a stack-allocated
// buffer, before copying it into individual code objects. Be careful
// with alignment, some platforms don't like unaligned code.
- union { int force_alignment; byte buffer[8*KB]; } u;
+ // TODO(jbramley): I had to increase the size of this buffer from 8KB because
+ // we can generate a lot of debug code on ARM64.
+ union { int force_alignment; byte buffer[16*KB]; } u;
// Traverse the list of builtins and generate an adaptor in a
// separate code object for each one.
@@ -1650,7 +1637,7 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
{
// During startup it's OK to always allocate and defer GC to later.
// This simplifies things because we don't need to retry.
- AlwaysAllocateScope __scope__;
+ AlwaysAllocateScope __scope__(isolate);
{ MaybeObject* maybe_code =
heap->CreateCode(desc, flags, masm.CodeObject());
if (!maybe_code->ToObject(&code)) {
@@ -1712,12 +1699,12 @@ const char* Builtins::Lookup(byte* pc) {
void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
- masm->TailCallRuntime(Runtime::kInterrupt, 0, 1);
+ masm->TailCallRuntime(Runtime::kHiddenInterrupt, 0, 1);
}
void Builtins::Generate_StackCheck(MacroAssembler* masm) {
- masm->TailCallRuntime(Runtime::kStackGuard, 0, 1);
+ masm->TailCallRuntime(Runtime::kHiddenStackGuard, 0, 1);
}
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index d977a4817..88cfd53f4 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -137,7 +137,7 @@ enum BuiltinExtraArguments {
kNoExtraICState) \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MONOMORPHIC, \
kNoExtraICState) \
- V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MONOMORPHIC, \
+ V(KeyedLoadIC_SloppyArguments, KEYED_LOAD_IC, MONOMORPHIC, \
kNoExtraICState) \
\
V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
@@ -156,8 +156,8 @@ enum BuiltinExtraArguments {
StoreIC::kStrictModeState) \
V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, GENERIC, \
StoreIC::kStrictModeState) \
- V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MONOMORPHIC, \
- kNoExtraICState) \
+ V(KeyedStoreIC_SloppyArguments, KEYED_STORE_IC, MONOMORPHIC, \
+ kNoExtraICState) \
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED, \
diff --git a/deps/v8/src/char-predicates.h b/deps/v8/src/char-predicates.h
index 767ad6513..f52feda6c 100644
--- a/deps/v8/src/char-predicates.h
+++ b/deps/v8/src/char-predicates.h
@@ -66,6 +66,27 @@ struct IdentifierPart {
}
};
+
+// WhiteSpace according to ECMA-262 5.1, 7.2.
+struct WhiteSpace {
+ static inline bool Is(uc32 c) {
+ return c == 0x0009 || // <TAB>
+ c == 0x000B || // <VT>
+ c == 0x000C || // <FF>
+ c == 0xFEFF || // <BOM>
+ // \u0020 and \u00A0 are included in unibrow::WhiteSpace.
+ unibrow::WhiteSpace::Is(c);
+ }
+};
+
+
+// WhiteSpace and LineTerminator according to ECMA-262 5.1, 7.2 and 7.3.
+struct WhiteSpaceOrLineTerminator {
+ static inline bool Is(uc32 c) {
+ return WhiteSpace::Is(c) || unibrow::LineTerminator::Is(c);
+ }
+};
+
} } // namespace v8::internal
#endif // V8_CHAR_PREDICATES_H_
diff --git a/deps/v8/src/checks.cc b/deps/v8/src/checks.cc
index 62e04ff20..3a2de28a2 100644
--- a/deps/v8/src/checks.cc
+++ b/deps/v8/src/checks.cc
@@ -38,30 +38,34 @@
#include "platform.h"
#include "v8.h"
+namespace v8 {
+namespace internal {
+
+intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
// Attempts to dump a backtrace (if supported).
-static V8_INLINE void DumpBacktrace() {
+void DumpBacktrace() {
#if V8_LIBC_GLIBC || V8_OS_BSD
void* trace[100];
int size = backtrace(trace, ARRAY_SIZE(trace));
char** symbols = backtrace_symbols(trace, size);
- i::OS::PrintError("\n==== C stack trace ===============================\n\n");
+ OS::PrintError("\n==== C stack trace ===============================\n\n");
if (size == 0) {
- i::OS::PrintError("(empty)\n");
+ OS::PrintError("(empty)\n");
} else if (symbols == NULL) {
- i::OS::PrintError("(no symbols)\n");
+ OS::PrintError("(no symbols)\n");
} else {
for (int i = 1; i < size; ++i) {
- i::OS::PrintError("%2d: ", i);
+ OS::PrintError("%2d: ", i);
char mangled[201];
if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
int status;
size_t length;
char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
- i::OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
+ OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
free(demangled);
} else {
- i::OS::PrintError("??\n");
+ OS::PrintError("??\n");
}
}
}
@@ -73,22 +77,24 @@ static V8_INLINE void DumpBacktrace() {
bt_init_accessor(&acc, BT_SELF);
bt_load_memmap(&acc, &memmap);
bt_sprn_memmap(&memmap, out, sizeof(out));
- i::OS::PrintError(out);
+ OS::PrintError(out);
bt_addr_t trace[100];
int size = bt_get_backtrace(&acc, trace, ARRAY_SIZE(trace));
- i::OS::PrintError("\n==== C stack trace ===============================\n\n");
+ OS::PrintError("\n==== C stack trace ===============================\n\n");
if (size == 0) {
- i::OS::PrintError("(empty)\n");
+ OS::PrintError("(empty)\n");
} else {
bt_sprnf_addrs(&memmap, trace, size, const_cast<char*>("%a\n"),
out, sizeof(out), NULL);
- i::OS::PrintError(out);
+ OS::PrintError(out);
}
bt_unload_memmap(&memmap);
bt_release_accessor(&acc);
#endif // V8_LIBC_GLIBC || V8_OS_BSD
}
+} } // namespace v8::internal
+
// Contains protection against recursive calls (faults while handling faults).
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
@@ -102,7 +108,7 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
i::OS::VPrintError(format, arguments);
va_end(arguments);
i::OS::PrintError("\n#\n");
- DumpBacktrace();
+ v8::internal::DumpBacktrace();
fflush(stderr);
i::OS::Abort();
}
@@ -136,10 +142,3 @@ void CheckNonEqualsHelper(const char* file,
unexpected_source, value_source, *value_str);
}
}
-
-
-namespace v8 { namespace internal {
-
- intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index f7b145fc8..e53475a0a 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -34,6 +34,7 @@
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
+
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
// development, but they should not be relied on in the final product.
#ifdef DEBUG
@@ -51,6 +52,23 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
#define UNREACHABLE() ((void) 0)
#endif
+// Simulator specific helpers.
+#if defined(USE_SIMULATOR) && defined(V8_TARGET_ARCH_ARM64)
+ // TODO(all): If possible automatically prepend an indicator like
+ // UNIMPLEMENTED or LOCATION.
+ #define ASM_UNIMPLEMENTED(message) \
+ __ Debug(message, __LINE__, NO_PARAM)
+ #define ASM_UNIMPLEMENTED_BREAK(message) \
+ __ Debug(message, __LINE__, \
+ FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
+ #define ASM_LOCATION(message) \
+ __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
+#else
+ #define ASM_UNIMPLEMENTED(message)
+ #define ASM_UNIMPLEMENTED_BREAK(message)
+ #define ASM_LOCATION(message)
+#endif
+
// The CHECK macro checks that the given condition is true; if not, it
// prints a message to stderr and aborts.
@@ -288,8 +306,12 @@ extern bool FLAG_enable_slow_asserts;
#define SLOW_ASSERT(condition) ((void) 0)
const bool FLAG_enable_slow_asserts = false;
#endif
-} // namespace internal
-} // namespace v8
+
+// Exposed for making debugging easier (to see where your function is being
+// called, just add a call to DumpBacktrace).
+void DumpBacktrace();
+
+} } // namespace v8::internal
// The ASSERT macro is equivalent to CHECK except that it only
diff --git a/deps/v8/src/circular-queue.h b/deps/v8/src/circular-queue.h
index 94bc89e7d..71ef38322 100644
--- a/deps/v8/src/circular-queue.h
+++ b/deps/v8/src/circular-queue.h
@@ -28,6 +28,7 @@
#ifndef V8_CIRCULAR_QUEUE_H_
#define V8_CIRCULAR_QUEUE_H_
+#include "atomicops.h"
#include "v8globals.h"
namespace v8 {
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 455d08768..040c26013 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -81,6 +81,11 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HContext* context() { return context_; }
Isolate* isolate() { return info_.isolate(); }
+ HLoadNamedField* BuildLoadNamedField(HValue* object,
+ Representation representation,
+ int offset,
+ bool is_inobject);
+
enum ArgumentClass {
NONE,
SINGLE,
@@ -93,9 +98,20 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HValue* BuildInternalArrayConstructor(ElementsKind kind,
ArgumentClass argument_class);
- void BuildInstallOptimizedCode(HValue* js_function, HValue* native_context,
- HValue* code_object);
+ // BuildCheckAndInstallOptimizedCode emits code to install the optimized
+ // function found in the optimized code map at map_index in js_function, if
+ // the function at map_index matches the given native_context. Builder is
+ // left in the "Then()" state after the install.
+ void BuildCheckAndInstallOptimizedCode(HValue* js_function,
+ HValue* native_context,
+ IfBuilder* builder,
+ HValue* optimized_map,
+ HValue* map_index);
void BuildInstallCode(HValue* js_function, HValue* shared_info);
+
+ HInstruction* LoadFromOptimizedCodeMap(HValue* optimized_map,
+ HValue* iterator,
+ int field_offset);
void BuildInstallFromOptimizedCodeMap(HValue* js_function,
HValue* shared_info,
HValue* native_context);
@@ -247,8 +263,7 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) {
GetCodeKind(),
GetICState(),
GetExtraICState(),
- GetStubType(),
- GetStubFlags());
+ GetStubType());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object;
@@ -530,15 +545,11 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
Add<HStoreNamedField>(site_list, HObjectAccess::ForAllocationSiteList(),
object);
- // We use a hammer (SkipWriteBarrier()) to indicate that we know the input
- // cell is really a Cell, and so no write barrier is needed.
- // TODO(mvstanton): Add a debug_code check to verify the input cell is really
- // a cell. (perhaps with a new instruction, HAssert).
- HInstruction* cell = GetParameter(0);
- HObjectAccess access = HObjectAccess::ForCellValue();
- store = Add<HStoreNamedField>(cell, access, object);
- store->SkipWriteBarrier();
- return cell;
+ HInstruction* feedback_vector = GetParameter(0);
+ HInstruction* slot = GetParameter(1);
+ Add<HStoreKeyed>(feedback_vector, slot, object, FAST_ELEMENTS,
+ INITIALIZING_STORE);
+ return feedback_vector;
}
@@ -552,7 +563,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
- false, NEVER_RETURN_HOLE, STANDARD_STORE);
+ LOAD, NEVER_RETURN_HOLE, STANDARD_STORE);
return load;
}
@@ -562,14 +573,32 @@ Handle<Code> KeyedLoadFastElementStub::GenerateCode(Isolate* isolate) {
}
+HLoadNamedField* CodeStubGraphBuilderBase::BuildLoadNamedField(
+ HValue* object,
+ Representation representation,
+ int offset,
+ bool is_inobject) {
+ HObjectAccess access = is_inobject
+ ? HObjectAccess::ForObservableJSObjectOffset(offset, representation)
+ : HObjectAccess::ForBackingStoreOffset(offset, representation);
+ if (representation.IsDouble()) {
+ // Load the heap number.
+ object = Add<HLoadNamedField>(
+ object, static_cast<HValue*>(NULL),
+ access.WithRepresentation(Representation::Tagged()));
+ // Load the double value from it.
+ access = HObjectAccess::ForHeapNumberValue();
+ }
+ return Add<HLoadNamedField>(object, static_cast<HValue*>(NULL), access);
+}
+
+
template<>
HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
- Representation rep = casted_stub()->representation();
- int offset = casted_stub()->offset();
- HObjectAccess access = casted_stub()->is_inobject() ?
- HObjectAccess::ForObservableJSObjectOffset(offset, rep) :
- HObjectAccess::ForBackingStoreOffset(offset, rep);
- return AddLoadNamedField(GetParameter(0), access);
+ return BuildLoadNamedField(GetParameter(0),
+ casted_stub()->representation(),
+ casted_stub()->offset(),
+ casted_stub()->is_inobject());
}
@@ -579,17 +608,15 @@ Handle<Code> LoadFieldStub::GenerateCode(Isolate* isolate) {
template<>
-HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
- Representation rep = casted_stub()->representation();
- int offset = casted_stub()->offset();
- HObjectAccess access = casted_stub()->is_inobject() ?
- HObjectAccess::ForObservableJSObjectOffset(offset, rep) :
- HObjectAccess::ForBackingStoreOffset(offset, rep);
- return AddLoadNamedField(GetParameter(0), access);
+HValue* CodeStubGraphBuilder<StringLengthStub>::BuildCodeStub() {
+ HValue* string = BuildLoadNamedField(
+ GetParameter(0), Representation::Tagged(), JSValue::kValueOffset, true);
+ return BuildLoadNamedField(
+ string, Representation::Tagged(), String::kLengthOffset, true);
}
-Handle<Code> KeyedLoadFieldStub::GenerateCode(Isolate* isolate) {
+Handle<Code> StringLengthStub::GenerateCode(Isolate* isolate) {
return DoGenerateCode(isolate, this);
}
@@ -599,7 +626,7 @@ HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), GetParameter(2),
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
- true, NEVER_RETURN_HOLE, casted_stub()->store_mode());
+ STORE, NEVER_RETURN_HOLE, casted_stub()->store_mode());
return GetParameter(2);
}
@@ -914,7 +941,7 @@ HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
// If we encounter a generic argument, the number conversion is
// observable, thus we cannot afford to bail out after the fact.
if (!state.HasSideEffects()) {
- if (result_type->Is(Type::Smi())) {
+ if (result_type->Is(Type::SignedSmall())) {
if (state.op() == Token::SHR) {
// TODO(olivf) Replace this by a SmiTagU Instruction.
// 0x40000000: this number would convert to negative when interpreting
@@ -1033,13 +1060,16 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
Handle<PropertyCell> placeholder_cell =
isolate()->factory()->NewPropertyCell(placeholer_value);
- HParameter* receiver = GetParameter(0);
HParameter* value = GetParameter(2);
- // Check that the map of the global has not changed: use a placeholder map
- // that will be replaced later with the global object's map.
- Handle<Map> placeholder_map = isolate()->factory()->meta_map();
- Add<HCheckMaps>(receiver, placeholder_map, top_info());
+ if (stub->check_global()) {
+ // Check that the map of the global has not changed: use a placeholder map
+ // that will be replaced later with the global object's map.
+ Handle<Map> placeholder_map = isolate()->factory()->meta_map();
+ HValue* global = Add<HConstant>(
+ StoreGlobalStub::global_placeholder(isolate()));
+ Add<HCheckMaps>(global, placeholder_map, top_info());
+ }
HValue* cell = Add<HConstant>(placeholder_cell);
HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
@@ -1096,7 +1126,7 @@ HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(object, key, value,
casted_stub()->is_jsarray(),
casted_stub()->to_kind(),
- true, ALLOW_RETURN_HOLE,
+ STORE, ALLOW_RETURN_HOLE,
casted_stub()->store_mode());
}
@@ -1109,10 +1139,27 @@ Handle<Code> ElementsTransitionAndStoreStub::GenerateCode(Isolate* isolate) {
}
-void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(
+void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
HValue* js_function,
HValue* native_context,
- HValue* code_object) {
+ IfBuilder* builder,
+ HValue* optimized_map,
+ HValue* map_index) {
+ HValue* osr_ast_id_none = Add<HConstant>(BailoutId::None().ToInt());
+ HValue* context_slot = LoadFromOptimizedCodeMap(
+ optimized_map, map_index, SharedFunctionInfo::kContextOffset);
+ HValue* osr_ast_slot = LoadFromOptimizedCodeMap(
+ optimized_map, map_index, SharedFunctionInfo::kOsrAstIdOffset);
+ builder->If<HCompareObjectEqAndBranch>(native_context,
+ context_slot);
+ builder->AndIf<HCompareObjectEqAndBranch>(osr_ast_slot, osr_ast_id_none);
+ builder->Then();
+ HValue* code_object = LoadFromOptimizedCodeMap(optimized_map,
+ map_index, SharedFunctionInfo::kCachedCodeOffset);
+ // and the literals
+ HValue* literals = LoadFromOptimizedCodeMap(optimized_map,
+ map_index, SharedFunctionInfo::kLiteralsOffset);
+
Counters* counters = isolate()->counters();
AddIncrementCounter(counters->fast_new_closure_install_optimized());
@@ -1120,6 +1167,8 @@ void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(
// map and either unmangle them on marking or do nothing as the whole map is
// discarded on major GC anyway.
Add<HStoreCodeEntry>(js_function, code_object);
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForLiteralsPointer(),
+ literals);
// Now link a function into a list of optimized functions.
HValue* optimized_functions_list = Add<HLoadNamedField>(
@@ -1133,6 +1182,8 @@ void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(
Add<HStoreNamedField>(native_context,
HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST),
js_function);
+
+ // The builder continues in the "then" after this function.
}
@@ -1147,6 +1198,24 @@ void CodeStubGraphBuilderBase::BuildInstallCode(HValue* js_function,
}
+HInstruction* CodeStubGraphBuilderBase::LoadFromOptimizedCodeMap(
+ HValue* optimized_map,
+ HValue* iterator,
+ int field_offset) {
+ // By making sure to express these loads in the form [<hvalue> + constant]
+ // the keyed load can be hoisted.
+ ASSERT(field_offset >= 0 && field_offset < SharedFunctionInfo::kEntryLength);
+ HValue* field_slot = iterator;
+ if (field_offset > 0) {
+ HValue* field_offset_value = Add<HConstant>(field_offset);
+ field_slot = AddUncasted<HAdd>(iterator, field_offset_value);
+ }
+ HInstruction* field_entry = Add<HLoadKeyed>(optimized_map, field_slot,
+ static_cast<HValue*>(NULL), FAST_ELEMENTS);
+ return field_entry;
+}
+
+
void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
HValue* js_function,
HValue* shared_info,
@@ -1168,28 +1237,19 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
// optimized_map points to fixed array of 3-element entries
// (native context, optimized code, literals).
// Map must never be empty, so check the first elements.
- Label install_optimized;
- HValue* first_context_slot = Add<HLoadNamedField>(
- optimized_map, static_cast<HValue*>(NULL),
- HObjectAccess::ForFirstContextSlot());
- HValue* first_osr_ast_slot = Add<HLoadNamedField>(
- optimized_map, static_cast<HValue*>(NULL),
- HObjectAccess::ForFirstOsrAstIdSlot());
- HValue* osr_ast_id_none = Add<HConstant>(BailoutId::None().ToInt());
+ HValue* first_entry_index =
+ Add<HConstant>(SharedFunctionInfo::kEntriesStart);
IfBuilder already_in(this);
- already_in.If<HCompareObjectEqAndBranch>(native_context,
- first_context_slot);
- already_in.AndIf<HCompareObjectEqAndBranch>(first_osr_ast_slot,
- osr_ast_id_none);
- already_in.Then();
- {
- HValue* code_object = Add<HLoadNamedField>(
- optimized_map, static_cast<HValue*>(NULL),
- HObjectAccess::ForFirstCodeSlot());
- BuildInstallOptimizedCode(js_function, native_context, code_object);
- }
+ BuildCheckAndInstallOptimizedCode(js_function, native_context, &already_in,
+ optimized_map, first_entry_index);
already_in.Else();
{
+ // Iterate through the rest of map backwards. Do not double check first
+ // entry. After the loop, if no matching optimized code was found,
+ // install unoptimized code.
+ // for(i = map.length() - SharedFunctionInfo::kEntryLength;
+ // i > SharedFunctionInfo::kEntriesStart;
+ // i -= SharedFunctionInfo::kEntryLength) { .. }
HValue* shared_function_entry_length =
Add<HConstant>(SharedFunctionInfo::kEntryLength);
LoopBuilder loop_builder(this,
@@ -1199,63 +1259,34 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
HValue* array_length = Add<HLoadNamedField>(
optimized_map, static_cast<HValue*>(NULL),
HObjectAccess::ForFixedArrayLength());
- HValue* slot_iterator = loop_builder.BeginBody(array_length,
- graph()->GetConstant0(),
- Token::GT);
+ HValue* start_pos = AddUncasted<HSub>(array_length,
+ shared_function_entry_length);
+ HValue* slot_iterator = loop_builder.BeginBody(start_pos,
+ first_entry_index,
+ Token::GT);
{
- // Iterate through the rest of map backwards.
- // Do not double check first entry.
- HValue* second_entry_index =
- Add<HConstant>(SharedFunctionInfo::kSecondEntryIndex);
- IfBuilder restore_check(this);
- restore_check.If<HCompareNumericAndBranch>(
- slot_iterator, second_entry_index, Token::EQ);
- restore_check.Then();
- {
- // Store the unoptimized code
- BuildInstallCode(js_function, shared_info);
- loop_builder.Break();
- }
- restore_check.Else();
- {
- STATIC_ASSERT(SharedFunctionInfo::kContextOffset == 0);
- STATIC_ASSERT(SharedFunctionInfo::kEntryLength -
- SharedFunctionInfo::kOsrAstIdOffset == 1);
- HValue* native_context_slot = AddUncasted<HSub>(
- slot_iterator, shared_function_entry_length);
- HValue* osr_ast_id_slot = AddUncasted<HSub>(
- slot_iterator, graph()->GetConstant1());
- HInstruction* native_context_entry = Add<HLoadKeyed>(optimized_map,
- native_context_slot, static_cast<HValue*>(NULL), FAST_ELEMENTS);
- HInstruction* osr_ast_id_entry = Add<HLoadKeyed>(optimized_map,
- osr_ast_id_slot, static_cast<HValue*>(NULL), FAST_ELEMENTS);
- IfBuilder done_check(this);
- done_check.If<HCompareObjectEqAndBranch>(native_context,
- native_context_entry);
- done_check.AndIf<HCompareObjectEqAndBranch>(osr_ast_id_entry,
- osr_ast_id_none);
- done_check.Then();
- {
- // Hit: fetch the optimized code.
- HValue* code_slot = AddUncasted<HAdd>(
- native_context_slot, graph()->GetConstant1());
- HValue* code_object = Add<HLoadKeyed>(optimized_map,
- code_slot, static_cast<HValue*>(NULL), FAST_ELEMENTS);
- BuildInstallOptimizedCode(js_function, native_context, code_object);
-
- // Fall out of the loop
- loop_builder.Break();
- }
- done_check.Else();
- done_check.End();
- }
- restore_check.End();
+ IfBuilder done_check(this);
+ BuildCheckAndInstallOptimizedCode(js_function, native_context,
+ &done_check,
+ optimized_map,
+ slot_iterator);
+ // Fall out of the loop
+ loop_builder.Break();
}
loop_builder.EndBody();
+
+ // If slot_iterator equals first entry index, then we failed to find and
+ // install optimized code
+ IfBuilder no_optimized_code_check(this);
+ no_optimized_code_check.If<HCompareNumericAndBranch>(
+ slot_iterator, first_entry_index, Token::EQ);
+ no_optimized_code_check.Then();
+ {
+ // Store the unoptimized code
+ BuildInstallCode(js_function, shared_info);
+ }
}
- already_in.End();
}
- is_optimized.End();
}
@@ -1274,7 +1305,7 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
HInstruction* js_function = Add<HAllocate>(size, HType::JSObject(),
NOT_TENURED, JS_FUNCTION_TYPE);
- int map_index = Context::FunctionMapIndex(casted_stub()->language_mode(),
+ int map_index = Context::FunctionMapIndex(casted_stub()->strict_mode(),
casted_stub()->is_generator());
// Compute the function map in the current native context and set that
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index d86bc70dc..06203629a 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -86,9 +86,11 @@ Code::Kind CodeStub::GetCodeKind() const {
}
-Handle<Code> CodeStub::GetCodeCopyFromTemplate(Isolate* isolate) {
+Handle<Code> CodeStub::GetCodeCopy(Isolate* isolate,
+ const Code::FindAndReplacePattern& pattern) {
Handle<Code> ic = GetCode(isolate);
ic = isolate->factory()->CopyCode(ic);
+ ic->FindAndReplace(pattern);
RecordCodeGeneration(*ic, isolate);
return ic;
}
@@ -119,8 +121,7 @@ Handle<Code> PlatformCodeStub::GenerateCode(Isolate* isolate) {
GetCodeKind(),
GetICState(),
GetExtraICState(),
- GetStubType(),
- GetStubFlags());
+ GetStubType());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object;
@@ -562,7 +563,7 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
case DICTIONARY_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreDictionaryElement(masm);
break;
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -573,8 +574,8 @@ void ArgumentsAccessStub::PrintName(StringStream* stream) {
stream->Add("ArgumentsAccessStub_");
switch (type_) {
case READ_ELEMENT: stream->Add("ReadElement"); break;
- case NEW_NON_STRICT_FAST: stream->Add("NewNonStrictFast"); break;
- case NEW_NON_STRICT_SLOW: stream->Add("NewNonStrictSlow"); break;
+ case NEW_SLOPPY_FAST: stream->Add("NewSloppyFast"); break;
+ case NEW_SLOPPY_SLOW: stream->Add("NewSloppySlow"); break;
case NEW_STRICT: stream->Add("NewStrict"); break;
}
}
@@ -737,7 +738,7 @@ void NumberToStringStub::InstallDescriptors(Isolate* isolate) {
void FastNewClosureStub::InstallDescriptors(Isolate* isolate) {
- FastNewClosureStub stub(STRICT_MODE, false);
+ FastNewClosureStub stub(STRICT, false);
InstallDescriptor(isolate, &stub);
}
@@ -749,6 +750,14 @@ void FastNewContextStub::InstallDescriptors(Isolate* isolate) {
// static
+void FastCloneShallowArrayStub::InstallDescriptors(Isolate* isolate) {
+ FastCloneShallowArrayStub stub(FastCloneShallowArrayStub::CLONE_ELEMENTS,
+ DONT_TRACK_ALLOCATION_SITE, 0);
+ InstallDescriptor(isolate, &stub);
+}
+
+
+// static
void BinaryOpICStub::InstallDescriptors(Isolate* isolate) {
BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
InstallDescriptor(isolate, &stub);
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 8d283d9e3..5a8894233 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -51,9 +51,7 @@ namespace internal {
V(CompareIC) \
V(CompareNilIC) \
V(MathPow) \
- V(StringLength) \
V(FunctionPrototype) \
- V(StoreArrayLength) \
V(RecordWrite) \
V(StoreBufferOverflow) \
V(RegExpExec) \
@@ -98,9 +96,11 @@ namespace internal {
V(CallApiGetter) \
/* IC Handler stubs */ \
V(LoadField) \
- V(KeyedLoadField)
+ V(KeyedLoadField) \
+ V(StringLength) \
+ V(KeyedStringLength)
-// List of code stubs only used on ARM platforms.
+// List of code stubs only used on ARM 32 bits platforms.
#if V8_TARGET_ARCH_ARM
#define CODE_STUB_LIST_ARM(V) \
V(GetProperty) \
@@ -111,6 +111,19 @@ namespace internal {
#define CODE_STUB_LIST_ARM(V)
#endif
+// List of code stubs only used on ARM 64 bits platforms.
+#if V8_TARGET_ARCH_ARM64
+#define CODE_STUB_LIST_ARM64(V) \
+ V(GetProperty) \
+ V(SetProperty) \
+ V(InvokeBuiltin) \
+ V(DirectCEntry) \
+ V(StoreRegistersState) \
+ V(RestoreRegistersState)
+#else
+#define CODE_STUB_LIST_ARM64(V)
+#endif
+
// List of code stubs only used on MIPS platforms.
#if V8_TARGET_ARCH_MIPS
#define CODE_STUB_LIST_MIPS(V) \
@@ -126,6 +139,7 @@ namespace internal {
#define CODE_STUB_LIST(V) \
CODE_STUB_LIST_ALL_PLATFORMS(V) \
CODE_STUB_LIST_ARM(V) \
+ CODE_STUB_LIST_ARM64(V) \
CODE_STUB_LIST_MIPS(V)
// Stub is base classes of all stubs.
@@ -144,7 +158,9 @@ class CodeStub BASE_EMBEDDED {
Handle<Code> GetCode(Isolate* isolate);
// Retrieve the code for the stub, make and return a copy of the code.
- Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate);
+ Handle<Code> GetCodeCopy(
+ Isolate* isolate, const Code::FindAndReplacePattern& pattern);
+
static Major MajorKeyFromKey(uint32_t key) {
return static_cast<Major>(MajorKeyBits::decode(key));
}
@@ -188,9 +204,6 @@ class CodeStub BASE_EMBEDDED {
virtual Code::StubType GetStubType() {
return Code::NORMAL;
}
- virtual int GetStubFlags() {
- return -1;
- }
virtual void PrintName(StringStream* stream);
@@ -442,6 +455,8 @@ class RuntimeCallHelper {
#include "ia32/code-stubs-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/code-stubs-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/code-stubs-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/code-stubs-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -487,6 +502,13 @@ class ToNumberStub: public HydrogenCodeStub {
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
+ static void InstallDescriptors(Isolate* isolate) {
+ ToNumberStub stub;
+ stub.InitializeInterfaceDescriptor(
+ isolate,
+ isolate->code_stub_interface_descriptor(CodeStub::ToNumber));
+ }
+
private:
Major MajorKey() { return ToNumber; }
int NotMissMinorKey() { return 0; }
@@ -516,8 +538,8 @@ class NumberToStringStub V8_FINAL : public HydrogenCodeStub {
class FastNewClosureStub : public HydrogenCodeStub {
public:
- explicit FastNewClosureStub(LanguageMode language_mode, bool is_generator)
- : language_mode_(language_mode),
+ explicit FastNewClosureStub(StrictMode strict_mode, bool is_generator)
+ : strict_mode_(strict_mode),
is_generator_(is_generator) { }
virtual Handle<Code> GenerateCode(Isolate* isolate);
@@ -528,7 +550,7 @@ class FastNewClosureStub : public HydrogenCodeStub {
static void InstallDescriptors(Isolate* isolate);
- LanguageMode language_mode() const { return language_mode_; }
+ StrictMode strict_mode() const { return strict_mode_; }
bool is_generator() const { return is_generator_; }
private:
@@ -537,11 +559,11 @@ class FastNewClosureStub : public HydrogenCodeStub {
Major MajorKey() { return FastNewClosure; }
int NotMissMinorKey() {
- return StrictModeBits::encode(language_mode_ != CLASSIC_MODE) |
+ return StrictModeBits::encode(strict_mode_ == STRICT) |
IsGeneratorBits::encode(is_generator_);
}
- LanguageMode language_mode_;
+ StrictMode strict_mode_;
bool is_generator_;
};
@@ -625,6 +647,8 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub {
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
+ static void InstallDescriptors(Isolate* isolate);
+
private:
Mode mode_;
AllocationSiteMode allocation_site_mode_;
@@ -651,8 +675,7 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub {
// Maximum number of properties in copied object.
static const int kMaximumClonedProperties = 6;
- explicit FastCloneShallowObjectStub(int length)
- : length_(length) {
+ explicit FastCloneShallowObjectStub(int length) : length_(length) {
ASSERT_GE(length_, 0);
ASSERT_LE(length_, kMaximumClonedProperties);
}
@@ -826,20 +849,9 @@ class FunctionPrototypeStub: public ICStub {
};
-class StringLengthStub: public ICStub {
- public:
- explicit StringLengthStub(Code::Kind kind) : ICStub(kind) { }
- virtual void Generate(MacroAssembler* masm);
-
- private:
- STATIC_ASSERT(KindBits::kSize == 4);
- virtual CodeStub::Major MajorKey() { return StringLength; }
-};
-
-
class StoreICStub: public ICStub {
public:
- StoreICStub(Code::Kind kind, StrictModeFlag strict_mode)
+ StoreICStub(Code::Kind kind, StrictMode strict_mode)
: ICStub(kind), strict_mode_(strict_mode) { }
protected:
@@ -854,18 +866,7 @@ class StoreICStub: public ICStub {
return KindBits::encode(kind()) | StrictModeBits::encode(strict_mode_);
}
- StrictModeFlag strict_mode_;
-};
-
-
-class StoreArrayLengthStub: public StoreICStub {
- public:
- explicit StoreArrayLengthStub(Code::Kind kind, StrictModeFlag strict_mode)
- : StoreICStub(kind, strict_mode) { }
- virtual void Generate(MacroAssembler* masm);
-
- private:
- virtual CodeStub::Major MajorKey() { return StoreArrayLength; }
+ StrictMode strict_mode_;
};
@@ -883,7 +884,7 @@ class HICStub: public HydrogenCodeStub {
class HandlerStub: public HICStub {
public:
virtual Code::Kind GetCodeKind() const { return Code::HANDLER; }
- virtual int GetStubFlags() { return kind(); }
+ virtual ExtraICState GetExtraICState() { return kind(); }
protected:
HandlerStub() : HICStub() { }
@@ -937,11 +938,10 @@ class LoadFieldStub: public HandlerStub {
bool inobject,
int index,
Representation representation) {
- bool unboxed_double = FLAG_track_double_fields && representation.IsDouble();
bit_field_ = KindBits::encode(kind)
| InobjectBits::encode(inobject)
| IndexBits::encode(index)
- | UnboxedDoubleBits::encode(unboxed_double);
+ | UnboxedDoubleBits::encode(representation.IsDouble());
}
private:
@@ -953,22 +953,69 @@ class LoadFieldStub: public HandlerStub {
};
+class StringLengthStub: public HandlerStub {
+ public:
+ explicit StringLengthStub() : HandlerStub() {
+ Initialize(Code::LOAD_IC);
+ }
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ protected:
+ virtual Code::Kind kind() const {
+ return KindBits::decode(bit_field_);
+ }
+
+ void Initialize(Code::Kind kind) {
+ bit_field_ = KindBits::encode(kind);
+ }
+
+ private:
+ virtual CodeStub::Major MajorKey() { return StringLength; }
+};
+
+
+class KeyedStringLengthStub: public StringLengthStub {
+ public:
+ explicit KeyedStringLengthStub() : StringLengthStub() {
+ Initialize(Code::KEYED_LOAD_IC);
+ }
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ virtual CodeStub::Major MajorKey() { return KeyedStringLength; }
+};
+
+
class StoreGlobalStub : public HandlerStub {
public:
- explicit StoreGlobalStub(bool is_constant) {
- bit_field_ = IsConstantBits::encode(is_constant);
+ explicit StoreGlobalStub(bool is_constant, bool check_global) {
+ bit_field_ = IsConstantBits::encode(is_constant) |
+ CheckGlobalBits::encode(check_global);
+ }
+
+ static Handle<HeapObject> global_placeholder(Isolate* isolate) {
+ return isolate->factory()->uninitialized_value();
}
Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate,
- Map* receiver_map,
- PropertyCell* cell) {
- Handle<Code> code = CodeStub::GetCodeCopyFromTemplate(isolate);
- // Replace the placeholder cell and global object map with the actual global
- // cell and receiver map.
- Map* cell_map = isolate->heap()->global_property_cell_map();
- code->ReplaceNthObject(1, cell_map, cell);
- code->ReplaceNthObject(1, isolate->heap()->meta_map(), receiver_map);
- return code;
+ Handle<GlobalObject> global,
+ Handle<PropertyCell> cell) {
+ if (check_global()) {
+ Code::FindAndReplacePattern pattern;
+ pattern.Add(Handle<Map>(global_placeholder(isolate)->map()), global);
+ pattern.Add(isolate->factory()->meta_map(), Handle<Map>(global->map()));
+ pattern.Add(isolate->factory()->global_property_cell_map(), cell);
+ return CodeStub::GetCodeCopy(isolate, pattern);
+ } else {
+ Code::FindAndReplacePattern pattern;
+ pattern.Add(isolate->factory()->global_property_cell_map(), cell);
+ return CodeStub::GetCodeCopy(isolate, pattern);
+ }
}
virtual Code::Kind kind() const { return Code::STORE_IC; }
@@ -979,11 +1026,12 @@ class StoreGlobalStub : public HandlerStub {
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
- virtual ExtraICState GetExtraICState() { return bit_field_; }
-
- bool is_constant() {
+ bool is_constant() const {
return IsConstantBits::decode(bit_field_);
}
+ bool check_global() const {
+ return CheckGlobalBits::decode(bit_field_);
+ }
void set_is_constant(bool value) {
bit_field_ = IsConstantBits::update(bit_field_, value);
}
@@ -996,13 +1044,11 @@ class StoreGlobalStub : public HandlerStub {
}
private:
- virtual int NotMissMinorKey() { return GetExtraICState(); }
Major MajorKey() { return StoreGlobal; }
class IsConstantBits: public BitField<bool, 0, 1> {};
class RepresentationBits: public BitField<Representation::Kind, 1, 8> {};
-
- int bit_field_;
+ class CheckGlobalBits: public BitField<bool, 9, 1> {};
DISALLOW_COPY_AND_ASSIGN(StoreGlobalStub);
};
@@ -1010,13 +1056,14 @@ class StoreGlobalStub : public HandlerStub {
class CallApiFunctionStub : public PlatformCodeStub {
public:
- CallApiFunctionStub(bool restore_context,
+ CallApiFunctionStub(bool is_store,
bool call_data_undefined,
int argc) {
bit_field_ =
- RestoreContextBits::encode(restore_context) |
+ IsStoreBits::encode(is_store) |
CallDataUndefinedBits::encode(call_data_undefined) |
ArgumentBits::encode(argc);
+ ASSERT(!is_store || argc == 1);
}
private:
@@ -1024,7 +1071,7 @@ class CallApiFunctionStub : public PlatformCodeStub {
virtual Major MajorKey() V8_OVERRIDE { return CallApiFunction; }
virtual int MinorKey() V8_OVERRIDE { return bit_field_; }
- class RestoreContextBits: public BitField<bool, 0, 1> {};
+ class IsStoreBits: public BitField<bool, 0, 1> {};
class CallDataUndefinedBits: public BitField<bool, 1, 1> {};
class ArgumentBits: public BitField<int, 2, Code::kArgumentsBits> {};
@@ -1058,8 +1105,6 @@ class KeyedLoadFieldStub: public LoadFieldStub {
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
- virtual Handle<Code> GenerateCode(Isolate* isolate);
-
private:
virtual CodeStub::Major MajorKey() { return KeyedLoadField; }
};
@@ -1155,10 +1200,9 @@ class BinaryOpICWithAllocationSiteStub V8_FINAL : public PlatformCodeStub {
Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate,
Handle<AllocationSite> allocation_site) {
- Handle<Code> code = CodeStub::GetCodeCopyFromTemplate(isolate);
- // Replace the placeholder oddball with the actual allocation site.
- code->ReplaceNthObject(1, isolate->heap()->oddball_map(), *allocation_site);
- return code;
+ Code::FindAndReplacePattern pattern;
+ pattern.Add(isolate->factory()->oddball_map(), allocation_site);
+ return CodeStub::GetCodeCopy(isolate, pattern);
}
virtual Code::Kind GetCodeKind() const V8_OVERRIDE {
@@ -1368,7 +1412,7 @@ class CompareNilICStub : public HydrogenCodeStub {
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
- static void InitializeForIsolate(Isolate* isolate) {
+ static void InstallDescriptors(Isolate* isolate) {
CompareNilICStub compare_stub(kNullValue, UNINITIALIZED);
compare_stub.InitializeInterfaceDescriptor(
isolate,
@@ -1466,7 +1510,6 @@ class CEntryStub : public PlatformCodeStub {
void GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate_scope);
@@ -1520,8 +1563,8 @@ class ArgumentsAccessStub: public PlatformCodeStub {
public:
enum Type {
READ_ELEMENT,
- NEW_NON_STRICT_FAST,
- NEW_NON_STRICT_SLOW,
+ NEW_SLOPPY_FAST,
+ NEW_SLOPPY_SLOW,
NEW_STRICT
};
@@ -1536,8 +1579,8 @@ class ArgumentsAccessStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
void GenerateReadElement(MacroAssembler* masm);
void GenerateNewStrict(MacroAssembler* masm);
- void GenerateNewNonStrictFast(MacroAssembler* masm);
- void GenerateNewNonStrictSlow(MacroAssembler* masm);
+ void GenerateNewSloppyFast(MacroAssembler* masm);
+ void GenerateNewSloppySlow(MacroAssembler* masm);
virtual void PrintName(StringStream* stream);
};
@@ -1866,23 +1909,21 @@ class DoubleToIStub : public PlatformCodeStub {
int offset,
bool is_truncating,
bool skip_fastpath = false) : bit_field_(0) {
- bit_field_ = SourceRegisterBits::encode(source.code_) |
- DestinationRegisterBits::encode(destination.code_) |
+ bit_field_ = SourceRegisterBits::encode(source.code()) |
+ DestinationRegisterBits::encode(destination.code()) |
OffsetBits::encode(offset) |
IsTruncatingBits::encode(is_truncating) |
SkipFastPathBits::encode(skip_fastpath) |
SSEBits::encode(CpuFeatures::IsSafeForSnapshot(SSE2) ?
- CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
+ CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
}
Register source() {
- Register result = { SourceRegisterBits::decode(bit_field_) };
- return result;
+ return Register::from_code(SourceRegisterBits::decode(bit_field_));
}
Register destination() {
- Register result = { DestinationRegisterBits::decode(bit_field_) };
- return result;
+ return Register::from_code(DestinationRegisterBits::decode(bit_field_));
}
bool is_truncating() {
@@ -2334,7 +2375,7 @@ class ToBooleanStub: public HydrogenCodeStub {
virtual bool SometimesSetsUpAFrame() { return false; }
- static void InitializeForIsolate(Isolate* isolate) {
+ static void InstallDescriptors(Isolate* isolate) {
ToBooleanStub stub;
stub.InitializeInterfaceDescriptor(
isolate,
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 13ce2218d..ea0ead310 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -165,6 +165,8 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
function->debug_name()->ToCString().get(), tracing_scope.file());
}
PrintF(tracing_scope.file(), "--- Optimized code ---\n");
+ PrintF(tracing_scope.file(),
+ "optimization_id = %d\n", info->optimization_id());
} else {
PrintF(tracing_scope.file(), "--- Code ---\n");
}
@@ -220,11 +222,11 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
case READ_ELEMENT:
GenerateReadElement(masm);
break;
- case NEW_NON_STRICT_FAST:
- GenerateNewNonStrictFast(masm);
+ case NEW_SLOPPY_FAST:
+ GenerateNewSloppyFast(masm);
break;
- case NEW_NON_STRICT_SLOW:
- GenerateNewNonStrictSlow(masm);
+ case NEW_SLOPPY_SLOW:
+ GenerateNewSloppySlow(masm);
break;
case NEW_STRICT:
GenerateNewStrict(masm);
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 8bd430266..6b5f9513e 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -72,6 +72,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#include "ia32/codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/codegen-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/codegen-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/collection.js b/deps/v8/src/collection.js
index 1f7aef4f0..9054187a1 100644
--- a/deps/v8/src/collection.js
+++ b/deps/v8/src/collection.js
@@ -33,8 +33,6 @@
var $Set = global.Set;
var $Map = global.Map;
-var $WeakMap = global.WeakMap;
-var $WeakSet = global.WeakSet;
// Global sentinel to be used instead of undefined keys, which are not
// supported internally but required for Harmony sets and maps.
@@ -230,174 +228,3 @@ function SetUpMap() {
}
SetUpMap();
-
-
-// -------------------------------------------------------------------
-// Harmony WeakMap
-
-function WeakMapConstructor() {
- if (%_IsConstructCall()) {
- %WeakCollectionInitialize(this);
- } else {
- throw MakeTypeError('constructor_not_function', ['WeakMap']);
- }
-}
-
-
-function WeakMapGet(key) {
- if (!IS_WEAKMAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.get', this]);
- }
- if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
- return %WeakCollectionGet(this, key);
-}
-
-
-function WeakMapSet(key, value) {
- if (!IS_WEAKMAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.set', this]);
- }
- if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
- return %WeakCollectionSet(this, key, value);
-}
-
-
-function WeakMapHas(key) {
- if (!IS_WEAKMAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.has', this]);
- }
- if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
- return %WeakCollectionHas(this, key);
-}
-
-
-function WeakMapDelete(key) {
- if (!IS_WEAKMAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.delete', this]);
- }
- if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
- return %WeakCollectionDelete(this, key);
-}
-
-
-function WeakMapClear() {
- if (!IS_WEAKMAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakMap.prototype.clear', this]);
- }
- // Replace the internal table with a new empty table.
- %WeakCollectionInitialize(this);
-}
-
-
-// -------------------------------------------------------------------
-
-function SetUpWeakMap() {
- %CheckIsBootstrapping();
-
- %SetCode($WeakMap, WeakMapConstructor);
- %FunctionSetPrototype($WeakMap, new $Object());
- %SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
-
- // Set up the non-enumerable functions on the WeakMap prototype object.
- InstallFunctions($WeakMap.prototype, DONT_ENUM, $Array(
- "get", WeakMapGet,
- "set", WeakMapSet,
- "has", WeakMapHas,
- "delete", WeakMapDelete,
- "clear", WeakMapClear
- ));
-}
-
-SetUpWeakMap();
-
-
-// -------------------------------------------------------------------
-// Harmony WeakSet
-
-function WeakSetConstructor() {
- if (%_IsConstructCall()) {
- %WeakCollectionInitialize(this);
- } else {
- throw MakeTypeError('constructor_not_function', ['WeakSet']);
- }
-}
-
-
-function WeakSetAdd(value) {
- if (!IS_WEAKSET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakSet.prototype.add', this]);
- }
- if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
- throw %MakeTypeError('invalid_weakset_value', [this, value]);
- }
- return %WeakCollectionSet(this, value, true);
-}
-
-
-function WeakSetHas(value) {
- if (!IS_WEAKSET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakSet.prototype.has', this]);
- }
- if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
- throw %MakeTypeError('invalid_weakset_value', [this, value]);
- }
- return %WeakCollectionHas(this, value);
-}
-
-
-function WeakSetDelete(value) {
- if (!IS_WEAKSET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakSet.prototype.delete', this]);
- }
- if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
- throw %MakeTypeError('invalid_weakset_value', [this, value]);
- }
- return %WeakCollectionDelete(this, value);
-}
-
-
-function WeakSetClear() {
- if (!IS_WEAKSET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['WeakSet.prototype.clear', this]);
- }
- // Replace the internal table with a new empty table.
- %WeakCollectionInitialize(this);
-}
-
-
-// -------------------------------------------------------------------
-
-function SetUpWeakSet() {
- %CheckIsBootstrapping();
-
- %SetCode($WeakSet, WeakSetConstructor);
- %FunctionSetPrototype($WeakSet, new $Object());
- %SetProperty($WeakSet.prototype, "constructor", $WeakSet, DONT_ENUM);
-
- // Set up the non-enumerable functions on the WeakSet prototype object.
- InstallFunctions($WeakSet.prototype, DONT_ENUM, $Array(
- "add", WeakSetAdd,
- "has", WeakSetHas,
- "delete", WeakSetDelete,
- "clear", WeakSetClear
- ));
-}
-
-SetUpWeakSet();
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index a69ef4c76..54d4565e2 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -269,7 +269,7 @@ void CompilationCacheScript::Put(Handle<String> source,
Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
Handle<String> source,
Handle<Context> context,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position) {
// Make sure not to leak the table into the surrounding handle
// scope. Otherwise, we risk keeping old tables around even after
@@ -280,7 +280,7 @@ Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
result = table->LookupEval(
- *source, *context, language_mode, scope_position);
+ *source, *context, strict_mode, scope_position);
if (result->IsSharedFunctionInfo()) {
break;
}
@@ -421,7 +421,7 @@ Handle<SharedFunctionInfo> CompilationCache::LookupScript(
Handle<SharedFunctionInfo> CompilationCache::LookupEval(
Handle<String> source,
Handle<Context> context,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position) {
if (!IsEnabled()) {
return Handle<SharedFunctionInfo>::null();
@@ -430,11 +430,11 @@ Handle<SharedFunctionInfo> CompilationCache::LookupEval(
Handle<SharedFunctionInfo> result;
if (context->IsNativeContext()) {
result = eval_global_.Lookup(
- source, context, language_mode, scope_position);
+ source, context, strict_mode, scope_position);
} else {
ASSERT(scope_position != RelocInfo::kNoPosition);
result = eval_contextual_.Lookup(
- source, context, language_mode, scope_position);
+ source, context, strict_mode, scope_position);
}
return result;
}
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index ead52b5fa..b31de3111 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -136,10 +136,9 @@ class CompilationCacheScript : public CompilationSubCache {
// entries:
// 1. The source string.
// 2. The shared function info of the calling function.
-// 3. Whether the source should be compiled as strict code or as non-strict
-// code.
+// 3. Whether the source should be compiled as strict code or as sloppy code.
// Note: Currently there are clients of CompileEval that always compile
-// non-strict code even if the calling function is a strict mode function.
+// sloppy code even if the calling function is a strict mode function.
// More specifically these are the CompileString, DebugEvaluate and
// DebugEvaluateGlobal runtime functions.
// 4. The start position of the calling scope.
@@ -150,7 +149,7 @@ class CompilationCacheEval: public CompilationSubCache {
Handle<SharedFunctionInfo> Lookup(Handle<String> source,
Handle<Context> context,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position);
void Put(Handle<String> source,
@@ -222,7 +221,7 @@ class CompilationCache {
// contain a script for the given source string.
Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
Handle<Context> context,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position);
// Returns the regexp data associated with the given regexp if it
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index b9e13c166..4b539897b 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -56,37 +56,40 @@ namespace internal {
CompilationInfo::CompilationInfo(Handle<Script> script,
Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE)),
+ : flags_(StrictModeField::encode(SLOPPY)),
script_(script),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
- this_has_uses_(true) {
+ this_has_uses_(true),
+ optimization_id_(-1) {
Initialize(script->GetIsolate(), BASE, zone);
}
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
+ : flags_(StrictModeField::encode(SLOPPY) | IsLazy::encode(true)),
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
- this_has_uses_(true) {
+ this_has_uses_(true),
+ optimization_id_(-1) {
Initialize(script_->GetIsolate(), BASE, zone);
}
CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
+ : flags_(StrictModeField::encode(SLOPPY) | IsLazy::encode(true)),
closure_(closure),
shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
script_(Handle<Script>(Script::cast(shared_info_->script()))),
context_(closure->context()),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
- this_has_uses_(true) {
+ this_has_uses_(true),
+ optimization_id_(-1) {
Initialize(script_->GetIsolate(), BASE, zone);
}
@@ -94,11 +97,11 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
Isolate* isolate,
Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE) |
- IsLazy::encode(true)),
+ : flags_(StrictModeField::encode(SLOPPY) | IsLazy::encode(true)),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
- this_has_uses_(true) {
+ this_has_uses_(true),
+ optimization_id_(-1) {
Initialize(isolate, STUB, zone);
code_stub_ = stub;
}
@@ -112,7 +115,8 @@ void CompilationInfo::Initialize(Isolate* isolate,
scope_ = NULL;
global_scope_ = NULL;
extension_ = NULL;
- pre_parse_data_ = NULL;
+ cached_data_ = NULL;
+ cached_data_mode_ = NO_CACHED_DATA;
zone_ = zone;
deferred_handles_ = NULL;
code_stub_ = NULL;
@@ -133,8 +137,8 @@ void CompilationInfo::Initialize(Isolate* isolate,
MarkAsNative();
}
if (!shared_info_.is_null()) {
- ASSERT(language_mode() == CLASSIC_MODE);
- SetLanguageMode(shared_info_->language_mode());
+ ASSERT(strict_mode() == SLOPPY);
+ SetStrictMode(shared_info_->strict_mode());
}
set_bailout_reason(kUnknown);
}
@@ -211,8 +215,7 @@ Code::Flags CompilationInfo::flags() const {
return Code::ComputeFlags(code_stub()->GetCodeKind(),
code_stub()->GetICState(),
code_stub()->GetExtraICState(),
- code_stub()->GetStubType(),
- code_stub()->GetStubFlags());
+ code_stub()->GetStubType());
} else {
return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
}
@@ -225,7 +228,7 @@ void CompilationInfo::DisableOptimization() {
FLAG_optimize_closures &&
closure_.is_null() &&
!scope_->HasTrivialOuterContext() &&
- !scope_->outer_scope_calls_non_strict_eval() &&
+ !scope_->outer_scope_calls_sloppy_eval() &&
!scope_->inside_with();
SetMode(is_optimizable_closure ? BASE : NONOPT);
}
@@ -243,6 +246,13 @@ bool CompilationInfo::ShouldSelfOptimize() {
}
+void CompilationInfo::PrepareForCompilation(Scope* scope) {
+ ASSERT(scope_ == NULL);
+ scope_ = scope;
+ function()->ProcessFeedbackSlots(isolate_);
+}
+
+
class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
public:
explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
@@ -363,7 +373,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
// Note that we use the same AST that we will use for generating the
// optimized code.
unoptimized.SetFunction(info()->function());
- unoptimized.SetScope(info()->scope());
+ unoptimized.PrepareForCompilation(info()->scope());
unoptimized.SetContext(info()->context());
if (should_recompile) unoptimized.EnableDeoptimizationSupport();
bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
@@ -398,7 +408,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
// Type-check the function.
AstTyper::Run(info());
- graph_builder_ = FLAG_emit_opt_code_positions
+ graph_builder_ = FLAG_hydrogen_track_positions
? new(info()->zone()) HOptimizedGraphBuilderWithPositions(info())
: new(info()->zone()) HOptimizedGraphBuilder(info());
@@ -571,7 +581,7 @@ static void UpdateSharedFunctionInfo(CompilationInfo* info) {
shared->set_dont_optimize_reason(lit->dont_optimize_reason());
shared->set_dont_inline(lit->flags()->Contains(kDontInline));
shared->set_ast_node_count(lit->ast_node_count());
- shared->set_language_mode(lit->language_mode());
+ shared->set_strict_mode(lit->strict_mode());
}
@@ -596,7 +606,7 @@ static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
function_info->set_allows_lazy_compilation_without_context(
lit->AllowsLazyCompilationWithoutContext());
- function_info->set_language_mode(lit->language_mode());
+ function_info->set_strict_mode(lit->strict_mode());
function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
function_info->set_ast_node_count(lit->ast_node_count());
@@ -627,8 +637,7 @@ static Handle<Code> GetUnoptimizedCodeCommon(CompilationInfo* info) {
VMState<COMPILER> state(info->isolate());
PostponeInterruptsScope postpone(info->isolate());
if (!Parser::Parse(info)) return Handle<Code>::null();
- LanguageMode language_mode = info->function()->language_mode();
- info->SetLanguageMode(language_mode);
+ info->SetStrictMode(info->function()->strict_mode());
if (!CompileUnoptimizedCode(info)) return Handle<Code>::null();
Compiler::RecordFunctionCompilation(
@@ -736,8 +745,7 @@ void Compiler::CompileForLiveEdit(Handle<Script> script) {
info.MarkAsGlobal();
if (!Parser::Parse(&info)) return;
- LanguageMode language_mode = info.function()->language_mode();
- info.SetLanguageMode(language_mode);
+ info.SetStrictMode(info.function()->strict_mode());
LiveEditFunctionTracker tracker(info.isolate(), info.function());
if (!CompileUnoptimizedCode(&info)) return;
@@ -775,10 +783,20 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
ASSERT(info->is_eval() || info->is_global());
bool parse_allow_lazy =
- (info->pre_parse_data() != NULL ||
+ (info->cached_data_mode() == CONSUME_CACHED_DATA ||
String::cast(script->source())->length() > FLAG_min_preparse_length) &&
!DebuggerWantsEagerCompilation(info);
+ if (!parse_allow_lazy && info->cached_data_mode() != NO_CACHED_DATA) {
+ // We are going to parse eagerly, but we either 1) have cached data produced
+ // by lazy parsing or 2) are asked to generate cached data. We cannot use
+ // the existing data, since it won't contain all the symbols we need for
+ // eager parsing. In addition, it doesn't make sense to produce the data
+ // when parsing eagerly. That data would contain all symbols, but no
+ // functions, so it cannot be used to aid lazy parsing later.
+ info->SetCachedData(NULL, NO_CACHED_DATA);
+ }
+
Handle<SharedFunctionInfo> result;
{ VMState<COMPILER> state(info->isolate());
@@ -846,7 +864,7 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
Handle<JSFunction> Compiler::GetFunctionFromEval(Handle<String> source,
Handle<Context> context,
- LanguageMode language_mode,
+ StrictMode strict_mode,
ParseRestriction restriction,
int scope_position) {
Isolate* isolate = source->GetIsolate();
@@ -856,14 +874,14 @@ Handle<JSFunction> Compiler::GetFunctionFromEval(Handle<String> source,
CompilationCache* compilation_cache = isolate->compilation_cache();
Handle<SharedFunctionInfo> shared_info = compilation_cache->LookupEval(
- source, context, language_mode, scope_position);
+ source, context, strict_mode, scope_position);
if (shared_info.is_null()) {
Handle<Script> script = isolate->factory()->NewScript(source);
CompilationInfoWithZone info(script);
info.MarkAsEval();
if (context->IsNativeContext()) info.MarkAsGlobal();
- info.SetLanguageMode(language_mode);
+ info.SetStrictMode(strict_mode);
info.SetParseRestriction(restriction);
info.SetContext(context);
@@ -880,14 +898,8 @@ Handle<JSFunction> Compiler::GetFunctionFromEval(Handle<String> source,
// to handle eval-code in the optimizing compiler.
shared_info->DisableOptimization(kEval);
- // If caller is strict mode, the result must be in strict mode or
- // extended mode as well, but not the other way around. Consider:
- // eval("'use strict'; ...");
- ASSERT(language_mode != STRICT_MODE || !shared_info->is_classic_mode());
- // If caller is in extended mode, the result must also be in
- // extended mode.
- ASSERT(language_mode != EXTENDED_MODE ||
- shared_info->is_extended_mode());
+ // If caller is strict mode, the result must be in strict mode as well.
+ ASSERT(strict_mode == SLOPPY || shared_info->strict_mode() == STRICT);
if (!shared_info->dont_cache()) {
compilation_cache->PutEval(
source, context, shared_info, scope_position);
@@ -902,16 +914,25 @@ Handle<JSFunction> Compiler::GetFunctionFromEval(Handle<String> source,
}
-Handle<SharedFunctionInfo> Compiler::CompileScript(Handle<String> source,
- Handle<Object> script_name,
- int line_offset,
- int column_offset,
- bool is_shared_cross_origin,
- Handle<Context> context,
- v8::Extension* extension,
- ScriptDataImpl* pre_data,
- Handle<Object> script_data,
- NativesFlag natives) {
+Handle<SharedFunctionInfo> Compiler::CompileScript(
+ Handle<String> source,
+ Handle<Object> script_name,
+ int line_offset,
+ int column_offset,
+ bool is_shared_cross_origin,
+ Handle<Context> context,
+ v8::Extension* extension,
+ ScriptDataImpl** cached_data,
+ CachedDataMode cached_data_mode,
+ NativesFlag natives) {
+ if (cached_data_mode == NO_CACHED_DATA) {
+ cached_data = NULL;
+ } else if (cached_data_mode == PRODUCE_CACHED_DATA) {
+ ASSERT(cached_data && !*cached_data);
+ } else {
+ ASSERT(cached_data_mode == CONSUME_CACHED_DATA);
+ ASSERT(cached_data && *cached_data);
+ }
Isolate* isolate = source->GetIsolate();
int source_length = source->length();
isolate->counters()->total_load_size()->Increment(source_length);
@@ -952,18 +973,13 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(Handle<String> source,
}
script->set_is_shared_cross_origin(is_shared_cross_origin);
- script->set_data(script_data.is_null() ? isolate->heap()->undefined_value()
- : *script_data);
-
// Compile the function and add it to the cache.
CompilationInfoWithZone info(script);
info.MarkAsGlobal();
info.SetExtension(extension);
- info.SetPreParseData(pre_data);
+ info.SetCachedData(cached_data, cached_data_mode);
info.SetContext(context);
- if (FLAG_use_strict) {
- info.SetLanguageMode(FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE);
- }
+ if (FLAG_use_strict) info.SetStrictMode(STRICT);
result = CompileToplevel(&info);
if (extension == NULL && !result.is_null() && !result->dont_cache()) {
compilation_cache->PutScript(source, context, result);
@@ -982,8 +998,8 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// Precondition: code has been parsed and scopes have been analyzed.
CompilationInfoWithZone info(script);
info.SetFunction(literal);
- info.SetScope(literal->scope());
- info.SetLanguageMode(literal->scope()->language_mode());
+ info.PrepareForCompilation(literal->scope());
+ info.SetStrictMode(literal->scope()->strict_mode());
Isolate* isolate = info.isolate();
Factory* factory = isolate->factory();
@@ -1078,8 +1094,7 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
static bool CompileOptimizedPrologue(CompilationInfo* info) {
if (!Parser::Parse(info)) return false;
- LanguageMode language_mode = info->function()->language_mode();
- info->SetLanguageMode(language_mode);
+ info->SetStrictMode(info->function()->strict_mode());
if (!Rewriter::Rewrite(info)) return false;
if (!Scope::Analyze(info)) return false;
@@ -1178,7 +1193,7 @@ Handle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
if (FLAG_trace_opt) {
PrintF("[failed to optimize ");
function->PrintName();
- PrintF("]\n");
+ PrintF(": %s]\n", GetBailoutReason(info->bailout_reason()));
}
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 3bf4db578..380201688 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -45,6 +45,12 @@ enum ParseRestriction {
ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression.
};
+enum CachedDataMode {
+ NO_CACHED_DATA,
+ CONSUME_CACHED_DATA,
+ PRODUCE_CACHED_DATA
+};
+
struct OffsetRange {
OffsetRange(int from, int to) : from(from), to(to) {}
int from;
@@ -66,11 +72,7 @@ class CompilationInfo {
bool is_lazy() const { return IsLazy::decode(flags_); }
bool is_eval() const { return IsEval::decode(flags_); }
bool is_global() const { return IsGlobal::decode(flags_); }
- bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
- bool is_extended_mode() const { return language_mode() == EXTENDED_MODE; }
- LanguageMode language_mode() const {
- return LanguageModeField::decode(flags_);
- }
+ StrictMode strict_mode() const { return StrictModeField::decode(flags_); }
bool is_in_loop() const { return IsInLoop::decode(flags_); }
FunctionLiteral* function() const { return function_; }
Scope* scope() const { return scope_; }
@@ -81,7 +83,10 @@ class CompilationInfo {
Handle<Script> script() const { return script_; }
HydrogenCodeStub* code_stub() const {return code_stub_; }
v8::Extension* extension() const { return extension_; }
- ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
+ ScriptDataImpl** cached_data() const { return cached_data_; }
+ CachedDataMode cached_data_mode() const {
+ return cached_data_mode_;
+ }
Handle<Context> context() const { return context_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
Handle<Code> unoptimized_code() const { return unoptimized_code_; }
@@ -109,11 +114,9 @@ class CompilationInfo {
bool this_has_uses() {
return this_has_uses_;
}
- void SetLanguageMode(LanguageMode language_mode) {
- ASSERT(this->language_mode() == CLASSIC_MODE ||
- this->language_mode() == language_mode ||
- language_mode == EXTENDED_MODE);
- flags_ = LanguageModeField::update(flags_, language_mode);
+ void SetStrictMode(StrictMode strict_mode) {
+ ASSERT(this->strict_mode() == SLOPPY || this->strict_mode() == strict_mode);
+ flags_ = StrictModeField::update(flags_, strict_mode);
}
void MarkAsInLoop() {
ASSERT(is_lazy());
@@ -175,10 +178,8 @@ class CompilationInfo {
ASSERT(function_ == NULL);
function_ = literal;
}
- void SetScope(Scope* scope) {
- ASSERT(scope_ == NULL);
- scope_ = scope;
- }
+ // When the scope is applied, we may have deferred work to do on the function.
+ void PrepareForCompilation(Scope* scope);
void SetGlobalScope(Scope* global_scope) {
ASSERT(global_scope_ == NULL);
global_scope_ = global_scope;
@@ -188,9 +189,15 @@ class CompilationInfo {
ASSERT(!is_lazy());
extension_ = extension;
}
- void SetPreParseData(ScriptDataImpl* pre_parse_data) {
- ASSERT(!is_lazy());
- pre_parse_data_ = pre_parse_data;
+ void SetCachedData(ScriptDataImpl** cached_data,
+ CachedDataMode cached_data_mode) {
+ cached_data_mode_ = cached_data_mode;
+ if (cached_data_mode == NO_CACHED_DATA) {
+ cached_data_ = NULL;
+ } else {
+ ASSERT(!is_lazy());
+ cached_data_ = cached_data;
+ }
}
void SetContext(Handle<Context> context) {
context_ = context;
@@ -229,6 +236,7 @@ class CompilationInfo {
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
unoptimized_code_ = unoptimized;
+ optimization_id_ = isolate()->NextOptimizationId();
}
void DisableOptimization();
@@ -317,6 +325,8 @@ class CompilationInfo {
return osr_ast_id_ == osr_ast_id && function.is_identical_to(closure_);
}
+ int optimization_id() const { return optimization_id_; }
+
protected:
CompilationInfo(Handle<Script> script,
Zone* zone);
@@ -359,26 +369,26 @@ class CompilationInfo {
// Flags that can be set for lazy compilation.
class IsInLoop: public BitField<bool, 3, 1> {};
// Strict mode - used in eager compilation.
- class LanguageModeField: public BitField<LanguageMode, 4, 2> {};
+ class StrictModeField: public BitField<StrictMode, 4, 1> {};
// Is this a function from our natives.
- class IsNative: public BitField<bool, 6, 1> {};
+ class IsNative: public BitField<bool, 5, 1> {};
// Is this code being compiled with support for deoptimization..
- class SupportsDeoptimization: public BitField<bool, 7, 1> {};
+ class SupportsDeoptimization: public BitField<bool, 6, 1> {};
// If compiling for debugging produce just full code matching the
// initial mode setting.
- class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
+ class IsCompilingForDebugging: public BitField<bool, 7, 1> {};
// If the compiled code contains calls that require building a frame
- class IsCalling: public BitField<bool, 9, 1> {};
+ class IsCalling: public BitField<bool, 8, 1> {};
// If the compiled code contains calls that require building a frame
- class IsDeferredCalling: public BitField<bool, 10, 1> {};
+ class IsDeferredCalling: public BitField<bool, 9, 1> {};
// If the compiled code contains calls that require building a frame
- class IsNonDeferredCalling: public BitField<bool, 11, 1> {};
+ class IsNonDeferredCalling: public BitField<bool, 10, 1> {};
// If the compiled code saves double caller registers that it clobbers.
- class SavesCallerDoubles: public BitField<bool, 12, 1> {};
+ class SavesCallerDoubles: public BitField<bool, 11, 1> {};
// If the set of valid statements is restricted.
- class ParseRestricitonField: public BitField<ParseRestriction, 13, 1> {};
+ class ParseRestricitonField: public BitField<ParseRestriction, 12, 1> {};
// If the function requires a frame (for unspecified reasons)
- class RequiresFrame: public BitField<bool, 14, 1> {};
+ class RequiresFrame: public BitField<bool, 13, 1> {};
unsigned flags_;
@@ -402,7 +412,8 @@ class CompilationInfo {
// Fields possibly needed for eager compilation, NULL by default.
v8::Extension* extension_;
- ScriptDataImpl* pre_parse_data_;
+ ScriptDataImpl** cached_data_;
+ CachedDataMode cached_data_mode_;
// The context of the caller for eval code, and the global context for a
// global script. Will be a null handle otherwise.
@@ -452,6 +463,8 @@ class CompilationInfo {
Handle<Foreign> object_wrapper_;
+ int optimization_id_;
+
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
@@ -615,21 +628,22 @@ class Compiler : public AllStatic {
// Compile a String source within a context for eval.
static Handle<JSFunction> GetFunctionFromEval(Handle<String> source,
Handle<Context> context,
- LanguageMode language_mode,
+ StrictMode strict_mode,
ParseRestriction restriction,
int scope_position);
// Compile a String source within a context.
- static Handle<SharedFunctionInfo> CompileScript(Handle<String> source,
- Handle<Object> script_name,
- int line_offset,
- int column_offset,
- bool is_shared_cross_origin,
- Handle<Context> context,
- v8::Extension* extension,
- ScriptDataImpl* pre_data,
- Handle<Object> script_data,
- NativesFlag is_natives_code);
+ static Handle<SharedFunctionInfo> CompileScript(
+ Handle<String> source,
+ Handle<Object> script_name,
+ int line_offset,
+ int column_offset,
+ bool is_shared_cross_origin,
+ Handle<Context> context,
+ v8::Extension* extension,
+ ScriptDataImpl** cached_data,
+ CachedDataMode cached_data_mode,
+ NativesFlag is_natives_code);
// Create a shared function info object (the code may be lazily compiled).
static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node,
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 710d30aa8..33d47e9c4 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -131,9 +131,9 @@ Handle<Object> Context::Lookup(Handle<String> name,
// to only do a local lookup for context extension objects.
if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
object->IsJSContextExtensionObject()) {
- *attributes = object->GetLocalPropertyAttribute(*name);
+ *attributes = JSReceiver::GetLocalPropertyAttribute(object, name);
} else {
- *attributes = object->GetPropertyAttribute(*name);
+ *attributes = JSReceiver::GetPropertyAttribute(object, name);
}
if (isolate->has_pending_exception()) return Handle<Object>();
@@ -185,12 +185,12 @@ Handle<Object> Context::Lookup(Handle<String> name,
*binding_flags = (init_flag == kNeedsInitialization)
? MUTABLE_CHECK_INITIALIZED : MUTABLE_IS_INITIALIZED;
break;
- case CONST:
+ case CONST_LEGACY:
*attributes = READ_ONLY;
*binding_flags = (init_flag == kNeedsInitialization)
? IMMUTABLE_CHECK_INITIALIZED : IMMUTABLE_IS_INITIALIZED;
break;
- case CONST_HARMONY:
+ case CONST:
*attributes = READ_ONLY;
*binding_flags = (init_flag == kNeedsInitialization)
? IMMUTABLE_CHECK_INITIALIZED_HARMONY :
@@ -222,8 +222,8 @@ Handle<Object> Context::Lookup(Handle<String> name,
}
*index = function_index;
*attributes = READ_ONLY;
- ASSERT(mode == CONST || mode == CONST_HARMONY);
- *binding_flags = (mode == CONST)
+ ASSERT(mode == CONST_LEGACY || mode == CONST);
+ *binding_flags = (mode == CONST_LEGACY)
? IMMUTABLE_IS_INITIALIZED : IMMUTABLE_IS_INITIALIZED_HARMONY;
return context;
}
@@ -368,7 +368,7 @@ Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() {
Handle<Object> result(error_message_for_code_gen_from_strings(),
GetIsolate());
if (!result->IsUndefined()) return result;
- return GetIsolate()->factory()->NewStringFromAscii(i::CStrVector(
+ return GetIsolate()->factory()->NewStringFromOneByte(STATIC_ASCII_VECTOR(
"Code generation from strings disallowed for this context"));
}
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index bd6c6a2bb..6ba9b3ed7 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -135,17 +135,19 @@ enum BindingFlags {
V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \
V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \
V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
- V(FUNCTION_MAP_INDEX, Map, function_map) \
- V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \
- V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
- V(STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
- strict_mode_function_without_prototype_map) \
+ V(SLOPPY_FUNCTION_MAP_INDEX, Map, sloppy_function_map) \
+ V(STRICT_FUNCTION_MAP_INDEX, Map, strict_function_map) \
+ V(SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
+ sloppy_function_without_prototype_map) \
+ V(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
+ strict_function_without_prototype_map) \
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
- V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
+ V(SLOPPY_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
+ sloppy_arguments_boilerplate) \
V(ALIASED_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
aliased_arguments_boilerplate) \
- V(STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
- strict_mode_arguments_boilerplate) \
+ V(STRICT_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
+ strict_arguments_boilerplate) \
V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \
V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
@@ -160,13 +162,19 @@ enum BindingFlags {
V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
- V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
V(MAP_CACHE_INDEX, Object, map_cache) \
V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
V(RUN_MICROTASKS_INDEX, JSFunction, run_microtasks) \
+ V(ENQUEUE_EXTERNAL_MICROTASK_INDEX, JSFunction, enqueue_external_microtask) \
+ V(IS_PROMISE_INDEX, JSFunction, is_promise) \
+ V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \
+ V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
+ V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \
+ V(PROMISE_CHAIN_INDEX, JSFunction, promise_chain) \
+ V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
to_complete_property_descriptor) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
@@ -179,9 +187,8 @@ enum BindingFlags {
observers_begin_perform_splice) \
V(OBSERVERS_END_SPLICE_INDEX, JSFunction, \
observers_end_perform_splice) \
- V(GENERATOR_FUNCTION_MAP_INDEX, Map, generator_function_map) \
- V(STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX, Map, \
- strict_mode_generator_function_map) \
+ V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map) \
+ V(STRICT_GENERATOR_FUNCTION_MAP_INDEX, Map, strict_generator_function_map) \
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, \
generator_object_prototype_map) \
V(GENERATOR_RESULT_MAP_INDEX, Map, generator_result_map)
@@ -225,8 +232,11 @@ enum BindingFlags {
// In addition, function contexts may have statically allocated context slots
// to store local variables/functions that are accessed from inner functions
// (via static context addresses) or through 'eval' (dynamic context lookups).
-// Finally, the native context contains additional slots for fast access to
-// native properties.
+// The native context contains additional slots for fast access to native
+// properties.
+//
+// Finally, with Harmony scoping, the JSFunction representing a top level
+// script will have the GlobalContext rather than a FunctionContext.
class Context: public FixedArray {
public:
@@ -255,14 +265,14 @@ class Context: public FixedArray {
// These slots are only in native contexts.
GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS,
SECURITY_TOKEN_INDEX,
- ARGUMENTS_BOILERPLATE_INDEX,
+ SLOPPY_ARGUMENTS_BOILERPLATE_INDEX,
ALIASED_ARGUMENTS_BOILERPLATE_INDEX,
- STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX,
+ STRICT_ARGUMENTS_BOILERPLATE_INDEX,
REGEXP_RESULT_MAP_INDEX,
- FUNCTION_MAP_INDEX,
- STRICT_MODE_FUNCTION_MAP_INDEX,
- FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
- STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
+ SLOPPY_FUNCTION_MAP_INDEX,
+ STRICT_FUNCTION_MAP_INDEX,
+ SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
+ STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
INITIAL_OBJECT_PROTOTYPE_INDEX,
INITIAL_ARRAY_PROTOTYPE_INDEX,
BOOLEAN_FUNCTION_INDEX,
@@ -318,6 +328,13 @@ class Context: public FixedArray {
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX,
RUN_MICROTASKS_INDEX,
+ ENQUEUE_EXTERNAL_MICROTASK_INDEX,
+ IS_PROMISE_INDEX,
+ PROMISE_CREATE_INDEX,
+ PROMISE_RESOLVE_INDEX,
+ PROMISE_REJECT_INDEX,
+ PROMISE_CHAIN_INDEX,
+ PROMISE_CATCH_INDEX,
TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX,
@@ -327,8 +344,8 @@ class Context: public FixedArray {
OBSERVERS_ENQUEUE_SPLICE_INDEX,
OBSERVERS_BEGIN_SPLICE_INDEX,
OBSERVERS_END_SPLICE_INDEX,
- GENERATOR_FUNCTION_MAP_INDEX,
- STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX,
+ SLOPPY_GENERATOR_FUNCTION_MAP_INDEX,
+ STRICT_GENERATOR_FUNCTION_MAP_INDEX,
GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX,
GENERATOR_RESULT_MAP_INDEX,
@@ -422,12 +439,6 @@ class Context: public FixedArray {
return map == map->GetHeap()->global_context_map();
}
- // Tells whether the native context is marked with out of memory.
- inline bool has_out_of_memory();
-
- // Mark the native context with out of memory.
- inline void mark_out_of_memory();
-
// A native context holds a list of all functions with optimized code.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
@@ -488,14 +499,14 @@ class Context: public FixedArray {
return kHeaderSize + index * kPointerSize - kHeapObjectTag;
}
- static int FunctionMapIndex(LanguageMode language_mode, bool is_generator) {
+ static int FunctionMapIndex(StrictMode strict_mode, bool is_generator) {
return is_generator
- ? (language_mode == CLASSIC_MODE
- ? GENERATOR_FUNCTION_MAP_INDEX
- : STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX)
- : (language_mode == CLASSIC_MODE
- ? FUNCTION_MAP_INDEX
- : STRICT_MODE_FUNCTION_MAP_INDEX);
+ ? (strict_mode == SLOPPY
+ ? SLOPPY_GENERATOR_FUNCTION_MAP_INDEX
+ : STRICT_GENERATOR_FUNCTION_MAP_INDEX)
+ : (strict_mode == SLOPPY
+ ? SLOPPY_FUNCTION_MAP_INDEX
+ : STRICT_FUNCTION_MAP_INDEX);
}
static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 3cb7ef299..e503eb502 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -128,7 +128,7 @@ inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
Iterator* current,
EndMark end) {
while (*current != end) {
- if (!unicode_cache->IsWhiteSpace(**current)) return true;
+ if (!unicode_cache->IsWhiteSpaceOrLineTerminator(**current)) return true;
++*current;
}
return false;
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index e0a6a60a0..e7fab1c3d 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -62,9 +62,7 @@ void HistogramTimer::Start() {
if (Enabled()) {
timer_.Start();
}
- if (FLAG_log_internal_timer_events) {
- LOG(isolate(), TimerEvent(Logger::START, name()));
- }
+ isolate()->event_logger()(name(), Logger::START);
}
@@ -75,9 +73,7 @@ void HistogramTimer::Stop() {
AddSample(static_cast<int>(timer_.Elapsed().InMilliseconds()));
timer_.Stop();
}
- if (FLAG_log_internal_timer_events) {
- LOG(isolate(), TimerEvent(Logger::END, name()));
- }
+ isolate()->event_logger()(name(), Logger::END);
}
} } // namespace v8::internal
diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc
index 2c909fa76..7eb2016bd 100644
--- a/deps/v8/src/d8-debug.cc
+++ b/deps/v8/src/d8-debug.cc
@@ -217,6 +217,8 @@ void RemoteDebugger::Run() {
delete event;
}
+ delete conn_;
+ conn_ = NULL;
// Wait for the receiver thread to end.
receiver.Join();
}
diff --git a/deps/v8/src/d8-debug.h b/deps/v8/src/d8-debug.h
index f75317726..2d4f5e150 100644
--- a/deps/v8/src/d8-debug.h
+++ b/deps/v8/src/d8-debug.h
@@ -31,6 +31,7 @@
#include "d8.h"
#include "debug.h"
+#include "platform/socket.h"
namespace v8 {
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 76ff4f943..7ac0c6546 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -119,6 +119,8 @@ class PerIsolateData {
Persistent<Context>* realms_;
Persistent<Value> realm_shared_;
+ int RealmIndexOrThrow(const v8::FunctionCallbackInfo<v8::Value>& args,
+ int arg_offset);
int RealmFind(Handle<Context> context);
};
@@ -203,7 +205,10 @@ bool Shell::ExecuteString(Isolate* isolate,
// When debugging make exceptions appear to be uncaught.
try_catch.SetVerbose(true);
}
- Handle<Script> script = Script::New(source, name);
+ ScriptOrigin origin(name);
+ ScriptCompiler::Source script_source(source, origin);
+ Handle<UnboundScript> script =
+ ScriptCompiler::CompileUnbound(isolate, &script_source);
if (script.IsEmpty()) {
// Print errors that happened during compilation.
if (report_exceptions && !FLAG_debugger)
@@ -214,7 +219,7 @@ bool Shell::ExecuteString(Isolate* isolate,
Local<Context> realm =
Local<Context>::New(isolate, data->realms_[data->realm_current_]);
realm->Enter();
- Handle<Value> result = script->Run();
+ Handle<Value> result = script->BindToCurrentContext()->Run();
realm->Exit();
data->realm_current_ = data->realm_switch_;
if (result.IsEmpty()) {
@@ -288,6 +293,24 @@ int PerIsolateData::RealmFind(Handle<Context> context) {
}
+int PerIsolateData::RealmIndexOrThrow(
+ const v8::FunctionCallbackInfo<v8::Value>& args,
+ int arg_offset) {
+ if (args.Length() < arg_offset || !args[arg_offset]->IsNumber()) {
+ Throw(args.GetIsolate(), "Invalid argument");
+ return -1;
+ }
+ int index = args[arg_offset]->Int32Value();
+ if (index < 0 ||
+ index >= realm_count_ ||
+ realms_[index].IsEmpty()) {
+ Throw(args.GetIsolate(), "Invalid realm index");
+ return -1;
+ }
+ return index;
+}
+
+
#ifndef V8_SHARED
// performance.now() returns a time stamp as double, measured in milliseconds.
void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -325,15 +348,8 @@ void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
// (Note that properties of global objects cannot be read/written cross-realm.)
void Shell::RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
PerIsolateData* data = PerIsolateData::Get(args.GetIsolate());
- if (args.Length() < 1 || !args[0]->IsNumber()) {
- Throw(args.GetIsolate(), "Invalid argument");
- return;
- }
- int index = args[0]->Uint32Value();
- if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
- Throw(args.GetIsolate(), "Invalid realm index");
- return;
- }
+ int index = data->RealmIndexOrThrow(args, 0);
+ if (index == -1) return;
args.GetReturnValue().Set(
Local<Context>::New(args.GetIsolate(), data->realms_[index])->Global());
}
@@ -361,13 +377,9 @@ void Shell::RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- if (args.Length() < 1 || !args[0]->IsNumber()) {
- Throw(args.GetIsolate(), "Invalid argument");
- return;
- }
- int index = args[0]->Uint32Value();
- if (index >= data->realm_count_ || data->realms_[index].IsEmpty() ||
- index == 0 ||
+ int index = data->RealmIndexOrThrow(args, 0);
+ if (index == -1) return;
+ if (index == 0 ||
index == data->realm_current_ || index == data->realm_switch_) {
Throw(args.GetIsolate(), "Invalid realm index");
return;
@@ -380,15 +392,8 @@ void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- if (args.Length() < 1 || !args[0]->IsNumber()) {
- Throw(args.GetIsolate(), "Invalid argument");
- return;
- }
- int index = args[0]->Uint32Value();
- if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
- Throw(args.GetIsolate(), "Invalid realm index");
- return;
- }
+ int index = data->RealmIndexOrThrow(args, 0);
+ if (index == -1) return;
data->realm_switch_ = index;
}
@@ -397,20 +402,19 @@ void Shell::RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- if (args.Length() < 2 || !args[0]->IsNumber() || !args[1]->IsString()) {
+ int index = data->RealmIndexOrThrow(args, 0);
+ if (index == -1) return;
+ if (args.Length() < 2 || !args[1]->IsString()) {
Throw(args.GetIsolate(), "Invalid argument");
return;
}
- int index = args[0]->Uint32Value();
- if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
- Throw(args.GetIsolate(), "Invalid realm index");
- return;
- }
- Handle<Script> script = Script::New(args[1]->ToString());
+ ScriptCompiler::Source script_source(args[1]->ToString());
+ Handle<UnboundScript> script = ScriptCompiler::CompileUnbound(
+ isolate, &script_source);
if (script.IsEmpty()) return;
Local<Context> realm = Local<Context>::New(isolate, data->realms_[index]);
realm->Enter();
- Handle<Value> result = script->Run();
+ Handle<Value> result = script->BindToCurrentContext()->Run();
realm->Exit();
args.GetReturnValue().Set(result);
}
@@ -807,7 +811,8 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
Handle<String> name =
String::NewFromUtf8(isolate, shell_source_name.start(),
String::kNormalString, shell_source_name.length());
- Handle<Script> script = Script::Compile(source, name);
+ ScriptOrigin origin(name);
+ Handle<Script> script = Script::Compile(source, &origin);
script->Run();
// Mark the d8 shell script as native to avoid it showing up as normal source
// in the debugger.
@@ -1435,6 +1440,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--throws") == 0) {
options.expected_to_throw = true;
argv[i] = NULL;
+ } else if (strncmp(argv[i], "--icu-data-file=", 16) == 0) {
+ options.icu_data_file = argv[i] + 16;
+ argv[i] = NULL;
}
#ifdef V8_SHARED
else if (strcmp(argv[i], "--dump-counters") == 0) {
@@ -1669,7 +1677,7 @@ class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1;
- v8::V8::InitializeICU();
+ v8::V8::InitializeICU(options.icu_data_file);
#ifndef V8_SHARED
i::FLAG_trace_hydrogen_file = "hydrogen.cfg";
i::FLAG_redirect_code_traces_to = "code.asm";
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index db2edb93c..3edd8a730 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -233,7 +233,8 @@ class ShellOptions {
expected_to_throw(false),
mock_arraybuffer_allocator(false),
num_isolates(1),
- isolate_sources(NULL) { }
+ isolate_sources(NULL),
+ icu_data_file(NULL) { }
~ShellOptions() {
#ifndef V8_SHARED
@@ -258,6 +259,7 @@ class ShellOptions {
bool mock_arraybuffer_allocator;
int num_isolates;
SourceGroup* isolate_sources;
+ const char* icu_data_file;
};
#ifdef V8_SHARED
diff --git a/deps/v8/src/date.cc b/deps/v8/src/date.cc
index 4afd8dc60..70d6be989 100644
--- a/deps/v8/src/date.cc
+++ b/deps/v8/src/date.cc
@@ -62,6 +62,7 @@ void DateCache::ResetDateCache() {
after_ = &dst_[1];
local_offset_ms_ = kInvalidLocalOffsetInMs;
ymd_valid_ = false;
+ OS::ClearTimezoneCache(tz_cache_);
}
diff --git a/deps/v8/src/date.h b/deps/v8/src/date.h
index fcd61db04..e9c9d9cb0 100644
--- a/deps/v8/src/date.h
+++ b/deps/v8/src/date.h
@@ -62,11 +62,14 @@ class DateCache {
// It is an invariant of DateCache that cache stamp is non-negative.
static const int kInvalidStamp = -1;
- DateCache() : stamp_(0) {
+ DateCache() : stamp_(0), tz_cache_(OS::CreateTimezoneCache()) {
ResetDateCache();
}
- virtual ~DateCache() {}
+ virtual ~DateCache() {
+ OS::DisposeTimezoneCache(tz_cache_);
+ tz_cache_ = NULL;
+ }
// Clears cached timezone information and increments the cache stamp.
@@ -113,7 +116,7 @@ class DateCache {
if (time_ms < 0 || time_ms > kMaxEpochTimeInMs) {
time_ms = EquivalentTime(time_ms);
}
- return OS::LocalTimezone(static_cast<double>(time_ms));
+ return OS::LocalTimezone(static_cast<double>(time_ms), tz_cache_);
}
// ECMA 262 - 15.9.5.26
@@ -182,11 +185,11 @@ class DateCache {
// These functions are virtual so that we can override them when testing.
virtual int GetDaylightSavingsOffsetFromOS(int64_t time_sec) {
double time_ms = static_cast<double>(time_sec * 1000);
- return static_cast<int>(OS::DaylightSavingsOffset(time_ms));
+ return static_cast<int>(OS::DaylightSavingsOffset(time_ms, tz_cache_));
}
virtual int GetLocalOffsetFromOS() {
- double offset = OS::LocalTimeOffset();
+ double offset = OS::LocalTimeOffset(tz_cache_);
ASSERT(offset < kInvalidLocalOffsetInMs);
return static_cast<int>(offset);
}
@@ -253,6 +256,8 @@ class DateCache {
int ymd_year_;
int ymd_month_;
int ymd_day_;
+
+ TimezoneCache* tz_cache_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index f3d4af244..b7ecbeb39 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -46,6 +46,7 @@ var timezone_cache_timezone;
function LocalTimezone(t) {
if (NUMBER_IS_NAN(t)) return "";
+ CheckDateCacheCurrent();
if (t == timezone_cache_time) {
return timezone_cache_timezone;
}
@@ -156,6 +157,7 @@ function DateConstructor(year, month, date, hours, minutes, seconds, ms) {
} else if (IS_STRING(year)) {
// Probe the Date cache. If we already have a time value for the
// given time, we re-use that instead of parsing the string again.
+ CheckDateCacheCurrent();
var cache = Date_cache;
if (cache.string === year) {
value = cache.time;
@@ -743,15 +745,26 @@ function DateToJSON(key) {
}
-function ResetDateCache() {
+var date_cache_version_holder;
+var date_cache_version = NAN;
+
+
+function CheckDateCacheCurrent() {
+ if (!date_cache_version_holder) {
+ date_cache_version_holder = %DateCacheVersion();
+ }
+ if (date_cache_version_holder[0] == date_cache_version) {
+ return;
+ }
+ date_cache_version = date_cache_version_holder[0];
+
// Reset the timezone cache:
timezone_cache_time = NAN;
- timezone_cache_timezone = undefined;
+ timezone_cache_timezone = UNDEFINED;
// Reset the date cache:
- cache = Date_cache;
- cache.time = NAN;
- cache.string = null;
+ Date_cache.time = NAN;
+ Date_cache.string = null;
}
diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h
index 27584ce39..7dc489de3 100644
--- a/deps/v8/src/dateparser.h
+++ b/deps/v8/src/dateparser.h
@@ -122,7 +122,7 @@ class DateParser : public AllStatic {
}
bool SkipWhiteSpace() {
- if (unicode_cache_->IsWhiteSpace(ch_)) {
+ if (unicode_cache_->IsWhiteSpaceOrLineTerminator(ch_)) {
Next();
return true;
}
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index d474e2059..d7667f19c 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -754,6 +754,7 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
isolate->bootstrapper()->NativesSourceLookup(index);
Vector<const char> name = Natives::GetScriptName(index);
Handle<String> script_name = factory->NewStringFromAscii(name);
+ ASSERT(!script_name.is_null());
Handle<Context> context = isolate->native_context();
// Compile the script.
@@ -762,8 +763,7 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
script_name, 0, 0,
false,
context,
- NULL, NULL,
- Handle<String>::null(),
+ NULL, NULL, NO_CACHED_DATA,
NATIVES_CODE);
// Silently ignore stack overflows during compilation.
@@ -792,7 +792,7 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
isolate->ComputeLocation(&computed_location);
Handle<Object> message = MessageHandler::MakeMessageObject(
isolate, "error_loading_debugger", &computed_location,
- Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
+ Vector<Handle<Object> >::empty(), Handle<JSArray>());
ASSERT(!isolate->has_pending_exception());
if (!exception.is_null()) {
isolate->set_pending_exception(*exception);
@@ -853,7 +853,7 @@ bool Debug::Load() {
key,
Handle<Object>(global->builtins(), isolate_),
NONE,
- kNonStrictMode),
+ SLOPPY),
false);
// Compile the JavaScript for the debugger in the debugger context.
@@ -1900,30 +1900,34 @@ static void RedirectActivationsToRecompiledCodeOnThread(
}
// Iterate over the RelocInfo in the original code to compute the sum of the
- // constant pools sizes. (See Assembler::CheckConstPool())
- // Note that this is only useful for architectures using constant pools.
- int constpool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL);
- int frame_const_pool_size = 0;
- for (RelocIterator it(*frame_code, constpool_mask); !it.done(); it.next()) {
+ // constant pools and veneer pools sizes. (See Assembler::CheckConstPool()
+ // and Assembler::CheckVeneerPool())
+ // Note that this is only useful for architectures using constant pools or
+ // veneer pools.
+ int pool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
+ RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
+ int frame_pool_size = 0;
+ for (RelocIterator it(*frame_code, pool_mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
if (info->pc() >= frame->pc()) break;
- frame_const_pool_size += static_cast<int>(info->data());
+ frame_pool_size += static_cast<int>(info->data());
}
intptr_t frame_offset =
- frame->pc() - frame_code->instruction_start() - frame_const_pool_size;
+ frame->pc() - frame_code->instruction_start() - frame_pool_size;
// Iterate over the RelocInfo for new code to find the number of bytes
// generated for debug slots and constant pools.
int debug_break_slot_bytes = 0;
- int new_code_const_pool_size = 0;
+ int new_code_pool_size = 0;
int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
- RelocInfo::ModeMask(RelocInfo::CONST_POOL);
+ RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
+ RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
// Check if the pc in the new code with debug break
// slots is before this slot.
RelocInfo* info = it.rinfo();
intptr_t new_offset = info->pc() - new_code->instruction_start() -
- new_code_const_pool_size - debug_break_slot_bytes;
+ new_code_pool_size - debug_break_slot_bytes;
if (new_offset >= frame_offset) {
break;
}
@@ -1932,14 +1936,14 @@ static void RedirectActivationsToRecompiledCodeOnThread(
debug_break_slot_bytes += Assembler::kDebugBreakSlotLength;
} else {
ASSERT(RelocInfo::IsConstPool(info->rmode()));
- // The size of the constant pool is encoded in the data.
- new_code_const_pool_size += static_cast<int>(info->data());
+ // The size of the pools is encoded in the data.
+ new_code_pool_size += static_cast<int>(info->data());
}
}
// Compute the equivalent pc in the new code.
byte* new_pc = new_code->instruction_start() + frame_offset +
- debug_break_slot_bytes + new_code_const_pool_size;
+ debug_break_slot_bytes + new_code_pool_size;
if (FLAG_trace_deopt) {
PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
@@ -2360,7 +2364,7 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
// Continue just after the slot.
thread_local_.after_break_target_ = addr + Assembler::kDebugBreakSlotLength;
- } else if (IsDebugBreak(Assembler::target_address_at(addr))) {
+ } else if (IsDebugBreak(Assembler::target_address_at(addr, *code))) {
// We now know that there is still a debug break call at the target address,
// so the break point is still there and the original code will hold the
// address to jump to in order to complete the call which is replaced by a
@@ -2371,13 +2375,15 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
// Install jump to the call address in the original code. This will be the
// call which was overwritten by the call to DebugBreakXXX.
- thread_local_.after_break_target_ = Assembler::target_address_at(addr);
+ thread_local_.after_break_target_ =
+ Assembler::target_address_at(addr, *original_code);
} else {
// There is no longer a break point present. Don't try to look in the
// original code as the running code will have the right address. This takes
// care of the case where the last break point is removed from the function
// and therefore no "original code" is available.
- thread_local_.after_break_target_ = Assembler::target_address_at(addr);
+ thread_local_.after_break_target_ =
+ Assembler::target_address_at(addr, *code);
}
}
@@ -2594,6 +2600,7 @@ Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
// Create the execution state object.
Handle<String> constructor_str =
isolate_->factory()->InternalizeUtf8String(constructor_name);
+ ASSERT(!constructor_str.is_null());
Handle<Object> constructor(
isolate_->global_object()->GetPropertyNoExceptionThrown(*constructor_str),
isolate_);
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 29575d8c0..4d5e60573 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -342,7 +342,6 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
// Unlink this function and evict from optimized code map.
SharedFunctionInfo* shared = function->shared();
function->set_code(shared->code());
- shared->EvictFromOptimizedCodeMap(code, "deoptimized function");
if (FLAG_trace_deopt) {
CodeTracer::Scope scope(code->GetHeap()->isolate()->GetCodeTracer());
@@ -358,9 +357,41 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
SelectedCodeUnlinker unlinker;
VisitAllOptimizedFunctionsForContext(context, &unlinker);
+ Isolate* isolate = context->GetHeap()->isolate();
+#ifdef DEBUG
+ Code* topmost_optimized_code = NULL;
+ bool safe_to_deopt_topmost_optimized_code = false;
+ // Make sure all activations of optimized code can deopt at their current PC.
+ // The topmost optimized code has special handling because it cannot be
+ // deoptimized due to weak object dependency.
+ for (StackFrameIterator it(isolate, isolate->thread_local_top());
+ !it.done(); it.Advance()) {
+ StackFrame::Type type = it.frame()->type();
+ if (type == StackFrame::OPTIMIZED) {
+ Code* code = it.frame()->LookupCode();
+ if (FLAG_trace_deopt) {
+ JSFunction* function =
+ static_cast<OptimizedFrame*>(it.frame())->function();
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[deoptimizer found activation of function: ");
+ function->PrintName(scope.file());
+ PrintF(scope.file(),
+ " / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+ }
+ SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
+ int deopt_index = safepoint.deoptimization_index();
+ bool safe_to_deopt = deopt_index != Safepoint::kNoDeoptimizationIndex;
+ CHECK(topmost_optimized_code == NULL || safe_to_deopt);
+ if (topmost_optimized_code == NULL) {
+ topmost_optimized_code = code;
+ safe_to_deopt_topmost_optimized_code = safe_to_deopt;
+ }
+ }
+ }
+#endif
+
// Move marked code from the optimized code list to the deoptimized
// code list, collecting them into a ZoneList.
- Isolate* isolate = context->GetHeap()->isolate();
Zone zone(isolate);
ZoneList<Code*> codes(10, &zone);
@@ -393,35 +424,17 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
element = next;
}
-#ifdef DEBUG
- // Make sure all activations of optimized code can deopt at their current PC.
- for (StackFrameIterator it(isolate, isolate->thread_local_top());
- !it.done(); it.Advance()) {
- StackFrame::Type type = it.frame()->type();
- if (type == StackFrame::OPTIMIZED) {
- Code* code = it.frame()->LookupCode();
- if (FLAG_trace_deopt) {
- JSFunction* function =
- static_cast<OptimizedFrame*>(it.frame())->function();
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[deoptimizer patches for lazy deopt: ");
- function->PrintName(scope.file());
- PrintF(scope.file(),
- " / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
- }
- SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
- int deopt_index = safepoint.deoptimization_index();
- CHECK(deopt_index != Safepoint::kNoDeoptimizationIndex);
- }
- }
-#endif
-
// TODO(titzer): we need a handle scope only because of the macro assembler,
// which is only used in EnsureCodeForDeoptimizationEntry.
HandleScope scope(isolate);
// Now patch all the codes for deoptimization.
for (int i = 0; i < codes.length(); i++) {
+#ifdef DEBUG
+ if (codes[i] == topmost_optimized_code) {
+ ASSERT(safe_to_deopt_topmost_optimized_code);
+ }
+#endif
// It is finally time to die, code object.
// Do platform-specific patching to force any activations to lazy deopt.
PatchCodeForDeoptimization(isolate, codes[i]);
@@ -755,6 +768,12 @@ void Deoptimizer::DoComputeOutputFrames() {
LOG(isolate(), CodeDeoptEvent(compiled_code_));
}
ElapsedTimer timer;
+
+ // Determine basic deoptimization information. The optimized frame is
+ // described by the input data.
+ DeoptimizationInputData* input_data =
+ DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
+
if (trace_scope_ != NULL) {
timer.Start();
PrintF(trace_scope_->file(),
@@ -763,7 +782,8 @@ void Deoptimizer::DoComputeOutputFrames() {
reinterpret_cast<intptr_t>(function_));
PrintFunctionName();
PrintF(trace_scope_->file(),
- " @%d, FP to SP delta: %d]\n",
+ " (opt #%d) @%d, FP to SP delta: %d]\n",
+ input_data->OptimizationId()->value(),
bailout_id_,
fp_to_sp_delta_);
if (bailout_type_ == EAGER || bailout_type_ == SOFT) {
@@ -771,10 +791,6 @@ void Deoptimizer::DoComputeOutputFrames() {
}
}
- // Determine basic deoptimization information. The optimized frame is
- // described by the input data.
- DeoptimizationInputData* input_data =
- DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
BailoutId node_id = input_data->AstId(bailout_id_);
ByteArray* translations = input_data->TranslationByteArray();
unsigned translation_index =
@@ -990,24 +1006,19 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
if (FLAG_enable_ool_constant_pool) {
// For the bottommost output frame the constant pool pointer can be gotten
- // from the input frame. For subsequent output frames, it can be gotten from
- // the function's code.
- Register constant_pool_reg =
- JavaScriptFrame::constant_pool_pointer_register();
+ // from the input frame. For subsequent output frames, it can be read from
+ // the previous frame.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
if (is_bottommost) {
value = input_->GetFrameSlot(input_offset);
} else {
- value = reinterpret_cast<intptr_t>(
- function->shared()->code()->constant_pool());
+ value = output_[frame_index - 1]->GetConstantPool();
}
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetConstantPool(value);
- if (is_topmost) output_frame->SetRegister(constant_pool_reg.code(), value);
+ output_frame->SetCallerConstantPool(output_offset, value);
if (trace_scope_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR "; constant_pool\n",
+ V8PRIxPTR "; caller's constant_pool\n",
top_address + output_offset, output_offset, value);
}
}
@@ -1065,6 +1076,18 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
output_frame->SetPc(pc_value);
+ // Update constant pool.
+ if (FLAG_enable_ool_constant_pool) {
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(non_optimized_code->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ if (is_topmost) {
+ Register constant_pool_reg =
+ JavaScriptFrame::constant_pool_pointer_register();
+ output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
+ }
+ }
+
FullCodeGenerator::State state =
FullCodeGenerator::StateField::decode(pc_and_state);
output_frame->SetState(Smi::FromInt(state));
@@ -1148,15 +1171,14 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
if (FLAG_enable_ool_constant_pool) {
- // A marker value is used in place of the constant pool.
+ // Read the caller's constant pool from the previous frame.
output_offset -= kPointerSize;
- intptr_t constant_pool = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- output_frame->SetFrameSlot(output_offset, constant_pool);
+ value = output_[frame_index - 1]->GetConstantPool();
+ output_frame->SetCallerConstantPool(output_offset, value);
if (trace_scope_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; constant_pool (adaptor sentinel)\n",
- top_address + output_offset, output_offset, constant_pool);
+ V8PRIxPTR "; caller's constant_pool\n",
+ top_address + output_offset, output_offset, value);
}
}
@@ -1203,6 +1225,11 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
adaptor_trampoline->instruction_start() +
isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
output_frame->SetPc(pc_value);
+ if (FLAG_enable_ool_constant_pool) {
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(adaptor_trampoline->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ }
}
@@ -1278,13 +1305,13 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
if (FLAG_enable_ool_constant_pool) {
- // The constant pool pointer can be gotten from the previous frame.
+ // Read the caller's constant pool from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetConstantPool();
- output_frame->SetFrameSlot(output_offset, value);
+ output_frame->SetCallerConstantPool(output_offset, value);
if (trace_scope_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; constant pool\n",
+ V8PRIxPTR " ; caller's constant pool\n",
top_address + output_offset, output_offset, value);
}
}
@@ -1365,6 +1392,11 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
construct_stub->instruction_start() +
isolate_->heap()->construct_stub_deopt_pc_offset()->value());
output_frame->SetPc(pc);
+ if (FLAG_enable_ool_constant_pool) {
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(construct_stub->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ }
}
@@ -1436,13 +1468,13 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
}
if (FLAG_enable_ool_constant_pool) {
- // The constant pool pointer can be gotten from the previous frame.
+ // Read the caller's constant pool from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetConstantPool();
- output_frame->SetFrameSlot(output_offset, value);
+ output_frame->SetCallerConstantPool(output_offset, value);
if (trace_scope_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; constant pool\n",
+ V8PRIxPTR " ; caller's constant pool\n",
top_address + output_offset, output_offset, value);
}
}
@@ -1504,6 +1536,11 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
intptr_t pc = reinterpret_cast<intptr_t>(
accessor_stub->instruction_start() + offset->value());
output_frame->SetPc(pc);
+ if (FLAG_enable_ool_constant_pool) {
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(accessor_stub->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ }
}
@@ -1607,17 +1644,14 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
}
if (FLAG_enable_ool_constant_pool) {
- // The constant pool pointer can be gotten from the input frame.
- Register constant_pool_pointer_register =
- StubFailureTrampolineFrame::constant_pool_pointer_register();
+ // Read the caller's constant pool from the input frame.
input_frame_offset -= kPointerSize;
value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetRegister(constant_pool_pointer_register.code(), value);
output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
+ output_frame->SetCallerConstantPool(output_frame_offset, value);
if (trace_scope_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; constant_pool_pointer\n",
+ V8PRIxPTR " ; caller's constant_pool\n",
top_address + output_frame_offset, output_frame_offset, value);
}
}
@@ -1751,6 +1785,14 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
ASSERT(trampoline != NULL);
output_frame->SetPc(reinterpret_cast<intptr_t>(
trampoline->instruction_start()));
+ if (FLAG_enable_ool_constant_pool) {
+ Register constant_pool_reg =
+ StubFailureTrampolineFrame::constant_pool_pointer_register();
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(trampoline->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
+ }
output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
Code* notify_failure = NotifyStubFailureBuiltin();
output_frame->SetContinuation(
@@ -2716,6 +2758,9 @@ FrameDescription::FrameDescription(uint32_t frame_size,
constant_pool_(kZapUint32) {
// Zap all the registers.
for (int r = 0; r < Register::kNumRegisters; r++) {
+ // TODO(jbramley): It isn't safe to use kZapUint32 here. If the register
+ // isn't used before the next safepoint, the GC will try to scan it as a
+ // tagged value. kZapUint32 looks like a valid tagged pointer, but it isn't.
SetRegister(r, kZapUint32);
}
@@ -3306,6 +3351,13 @@ Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
// tagged and skip materializing the HeapNumber explicitly.
Handle<Object> object = GetNext(isolate, lvl + 1);
materialized_objects_.Add(object);
+ // On 32-bit architectures, there is an extra slot there because
+ // the escape analysis calculates the number of slots as
+ // object-size/pointer-size. To account for this, we read out
+ // any extra slots.
+ for (int i = 0; i < length - 2; i++) {
+ GetNext(isolate, lvl + 1);
+ }
return object;
}
case JS_OBJECT_TYPE: {
@@ -3360,7 +3412,7 @@ Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
void SlotRefValueBuilder::Finish(Isolate* isolate) {
- // We should have processed all slot
+ // We should have processed all the slots
ASSERT(slot_refs_.length() == current_slot_);
if (materialized_objects_.length() > prev_materialized_count_) {
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 67690ded0..a36362fc9 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -134,7 +134,7 @@ class Deoptimizer : public Malloced {
static const int kBailoutTypesWithCodeEntry = SOFT + 1;
- struct JumpTableEntry {
+ struct JumpTableEntry : public ZoneObject {
inline JumpTableEntry(Address entry,
Deoptimizer::BailoutType type,
bool frame)
@@ -508,6 +508,8 @@ class FrameDescription {
void SetCallerFp(unsigned offset, intptr_t value);
+ void SetCallerConstantPool(unsigned offset, intptr_t value);
+
intptr_t GetRegister(unsigned n) const {
#if DEBUG
// This convoluted ASSERT is needed to work around a gcc problem that
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index f02d43ad8..2af64228c 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -200,7 +200,7 @@ static int DecodeIt(Isolate* isolate,
// Print all the reloc info for this instruction which are not comments.
for (int i = 0; i < pcs.length(); i++) {
// Put together the reloc info
- RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], NULL);
+ RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], converter.code());
// Indent the printing of the reloc info.
if (i == 0) {
diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc
index d2abb0442..ff458e0ea 100644
--- a/deps/v8/src/elements-kind.cc
+++ b/deps/v8/src/elements-kind.cc
@@ -66,7 +66,7 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
return kPointerSizeLog2;
}
UNREACHABLE();
@@ -142,14 +142,27 @@ int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
}
+ElementsKind GetNextTransitionElementsKind(ElementsKind kind) {
+ switch (kind) {
+#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: return EXTERNAL_##TYPE##_ELEMENTS;
+
+ TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE)
+#undef FIXED_TYPED_ARRAY_CASE
+ default: {
+ int index = GetSequenceIndexFromFastElementsKind(kind);
+ return GetFastElementsKindFromSequenceIndex(index + 1);
+ }
+ }
+}
+
+
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
bool allow_only_packed) {
ASSERT(IsFastElementsKind(elements_kind));
ASSERT(elements_kind != TERMINAL_FAST_ELEMENTS_KIND);
while (true) {
- int index =
- GetSequenceIndexFromFastElementsKind(elements_kind) + 1;
- elements_kind = GetFastElementsKindFromSequenceIndex(index);
+ elements_kind = GetNextTransitionElementsKind(elements_kind);
if (!IsFastHoleyElementsKind(elements_kind) || !allow_only_packed) {
return elements_kind;
}
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index 5a3f00dcc..d2605e8b0 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -51,7 +51,7 @@ enum ElementsKind {
// The "slow" kind.
DICTIONARY_ELEMENTS,
- NON_STRICT_ARGUMENTS_ELEMENTS,
+ SLOPPY_ARGUMENTS_ELEMENTS,
// The "fast" kind for external arrays
EXTERNAL_INT8_ELEMENTS,
EXTERNAL_UINT8_ELEMENTS,
@@ -100,10 +100,10 @@ void PrintElementsKind(FILE* out, ElementsKind kind);
ElementsKind GetInitialFastElementsKind();
-ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_index);
-
+ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number);
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind);
+ElementsKind GetNextTransitionElementsKind(ElementsKind elements_kind);
inline bool IsDictionaryElementsKind(ElementsKind kind) {
return kind == DICTIONARY_ELEMENTS;
@@ -116,6 +116,12 @@ inline bool IsExternalArrayElementsKind(ElementsKind kind) {
}
+inline bool IsTerminalElementsKind(ElementsKind kind) {
+ return kind == TERMINAL_FAST_ELEMENTS_KIND ||
+ IsExternalArrayElementsKind(kind);
+}
+
+
inline bool IsFixedTypedArrayElementsKind(ElementsKind kind) {
return kind >= FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND &&
kind <= LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND;
@@ -128,6 +134,11 @@ inline bool IsFastElementsKind(ElementsKind kind) {
}
+inline bool IsTransitionElementsKind(ElementsKind kind) {
+ return IsFastElementsKind(kind) || IsFixedTypedArrayElementsKind(kind);
+}
+
+
inline bool IsFastDoubleElementsKind(ElementsKind kind) {
return kind == FAST_DOUBLE_ELEMENTS ||
kind == FAST_HOLEY_DOUBLE_ELEMENTS;
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 2e4667d4a..0624a0362 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -68,7 +68,7 @@
// - FixedFloat64ElementsAccessor
// - FixedUint8ClampedElementsAccessor
// - DictionaryElementsAccessor
-// - NonStrictArgumentsElementsAccessor
+// - SloppyArgumentsElementsAccessor
namespace v8 {
@@ -95,7 +95,7 @@ static const int kPackedSizeNotKnown = -1;
FixedDoubleArray) \
V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, \
SeededNumberDictionary) \
- V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS, \
+ V(SloppyArgumentsElementsAccessor, SLOPPY_ARGUMENTS_ELEMENTS, \
FixedArray) \
V(ExternalInt8ElementsAccessor, EXTERNAL_INT8_ELEMENTS, \
ExternalInt8Array) \
@@ -160,18 +160,18 @@ static bool HasKey(FixedArray* array, Object* key) {
}
-static Failure* ThrowArrayLengthRangeError(Heap* heap) {
- HandleScope scope(heap->isolate());
- return heap->isolate()->Throw(
- *heap->isolate()->factory()->NewRangeError("invalid_array_length",
- HandleVector<Object>(NULL, 0)));
+static Handle<Object> ThrowArrayLengthRangeError(Isolate* isolate) {
+ isolate->Throw(
+ *isolate->factory()->NewRangeError("invalid_array_length",
+ HandleVector<Object>(NULL, 0)));
+ return Handle<Object>();
}
-static void CopyObjectToObjectElements(FixedArrayBase* from_base,
+static void CopyObjectToObjectElements(Handle<FixedArrayBase> from_base,
ElementsKind from_kind,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
@@ -189,7 +189,7 @@ static void CopyObjectToObjectElements(FixedArrayBase* from_base,
int length = to_base->length() - start;
if (length > 0) {
Heap* heap = from_base->GetHeap();
- MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
+ MemsetPointer(Handle<FixedArray>::cast(to_base)->data_start() + start,
heap->the_hole_value(), length);
}
}
@@ -197,8 +197,8 @@ static void CopyObjectToObjectElements(FixedArrayBase* from_base,
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedArray* to = FixedArray::cast(to_base);
+ Handle<FixedArray> from = Handle<FixedArray>::cast(from_base);
+ Handle<FixedArray> to = Handle<FixedArray>::cast(to_base);
ASSERT(IsFastSmiOrObjectElementsKind(from_kind));
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
Address to_address = to->address() + FixedArray::kHeaderSize;
@@ -209,23 +209,24 @@ static void CopyObjectToObjectElements(FixedArrayBase* from_base,
if (IsFastObjectElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
Heap* heap = from->GetHeap();
- if (!heap->InNewSpace(to)) {
+ if (!heap->InNewSpace(*to)) {
heap->RecordWrites(to->address(),
to->OffsetOfElementAt(to_start),
copy_size);
}
- heap->incremental_marking()->RecordWrites(to);
+ heap->incremental_marking()->RecordWrites(*to);
}
}
-static void CopyDictionaryToObjectElements(FixedArrayBase* from_base,
+static void CopyDictionaryToObjectElements(Handle<FixedArrayBase> from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
- SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
+ Handle<SeededNumberDictionary> from =
+ Handle<SeededNumberDictionary>::cast(from_base);
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
Heap* heap = from->GetHeap();
@@ -238,15 +239,15 @@ static void CopyDictionaryToObjectElements(FixedArrayBase* from_base,
int length = to_base->length() - start;
if (length > 0) {
Heap* heap = from->GetHeap();
- MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
+ MemsetPointer(Handle<FixedArray>::cast(to_base)->data_start() + start,
heap->the_hole_value(), length);
}
}
}
- ASSERT(to_base != from_base);
+ ASSERT(*to_base != *from_base);
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
if (copy_size == 0) return;
- FixedArray* to = FixedArray::cast(to_base);
+ Handle<FixedArray> to = Handle<FixedArray>::cast(to_base);
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
@@ -262,23 +263,22 @@ static void CopyDictionaryToObjectElements(FixedArrayBase* from_base,
}
}
if (IsFastObjectElementsKind(to_kind)) {
- if (!heap->InNewSpace(to)) {
+ if (!heap->InNewSpace(*to)) {
heap->RecordWrites(to->address(),
to->OffsetOfElementAt(to_start),
copy_size);
}
- heap->incremental_marking()->RecordWrites(to);
+ heap->incremental_marking()->RecordWrites(*to);
}
}
-MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
- FixedArrayBase* from_base,
- uint32_t from_start,
- FixedArrayBase* to_base,
- ElementsKind to_kind,
- uint32_t to_start,
- int raw_copy_size) {
+static void CopyDoubleToObjectElements(Handle<FixedArrayBase> from_base,
+ uint32_t from_start,
+ Handle<FixedArrayBase> to_base,
+ ElementsKind to_kind,
+ uint32_t to_start,
+ int raw_copy_size) {
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
@@ -294,49 +294,35 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
int length = to_base->length() - start;
if (length > 0) {
Heap* heap = from_base->GetHeap();
- MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
+ MemsetPointer(Handle<FixedArray>::cast(to_base)->data_start() + start,
heap->the_hole_value(), length);
}
}
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
- if (copy_size == 0) return from_base;
- FixedDoubleArray* from = FixedDoubleArray::cast(from_base);
- FixedArray* to = FixedArray::cast(to_base);
+ if (copy_size == 0) return;
+ Handle<FixedDoubleArray> from = Handle<FixedDoubleArray>::cast(from_base);
+ Handle<FixedArray> to = Handle<FixedArray>::cast(to_base);
for (int i = 0; i < copy_size; ++i) {
+ HandleScope scope(from_base->GetIsolate());
if (IsFastSmiElementsKind(to_kind)) {
UNIMPLEMENTED();
- return Failure::Exception();
} else {
- MaybeObject* maybe_value = from->get(i + from_start);
- Object* value;
ASSERT(IsFastObjectElementsKind(to_kind));
- // Because Double -> Object elements transitions allocate HeapObjects
- // iteratively, the allocate must succeed within a single GC cycle,
- // otherwise the retry after the GC will also fail. In order to ensure
- // that no GC is triggered, allocate HeapNumbers from old space if they
- // can't be taken from new space.
- if (!maybe_value->ToObject(&value)) {
- ASSERT(maybe_value->IsRetryAfterGC() || maybe_value->IsOutOfMemory());
- Heap* heap = from->GetHeap();
- MaybeObject* maybe_value_object =
- heap->AllocateHeapNumber(from->get_scalar(i + from_start),
- TENURED);
- if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
- }
- to->set(i + to_start, value, UPDATE_WRITE_BARRIER);
+ Handle<Object> value = from->get_as_handle(i + from_start);
+ to->set(i + to_start, *value, UPDATE_WRITE_BARRIER);
}
}
- return to;
}
-static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
+static void CopyDoubleToDoubleElements(Handle<FixedArrayBase> from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
uint32_t to_start,
int raw_copy_size) {
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@@ -345,15 +331,15 @@ static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
to_base->length() - to_start);
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
}
}
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedDoubleArray* from = FixedDoubleArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
+ Handle<FixedDoubleArray> from = Handle<FixedDoubleArray>::cast(from_base);
+ Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
Address to_address = to->address() + FixedDoubleArray::kHeaderSize;
Address from_address = from->address() + FixedDoubleArray::kHeaderSize;
to_address += kDoubleSize * to_start;
@@ -365,11 +351,12 @@ static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
}
-static void CopySmiToDoubleElements(FixedArrayBase* from_base,
+static void CopySmiToDoubleElements(Handle<FixedArrayBase> from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
uint32_t to_start,
int raw_copy_size) {
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@@ -377,20 +364,20 @@ static void CopySmiToDoubleElements(FixedArrayBase* from_base,
copy_size = from_base->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
}
}
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
- Object* the_hole = from->GetHeap()->the_hole_value();
+ Handle<FixedArray> from = Handle<FixedArray>::cast(from_base);
+ Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
+ Handle<Object> the_hole = from->GetIsolate()->factory()->the_hole_value();
for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size);
from_start < from_end; from_start++, to_start++) {
Object* hole_or_smi = from->get(from_start);
- if (hole_or_smi == the_hole) {
+ if (hole_or_smi == *the_hole) {
to->set_the_hole(to_start);
} else {
to->set(to_start, Smi::cast(hole_or_smi)->value());
@@ -399,12 +386,13 @@ static void CopySmiToDoubleElements(FixedArrayBase* from_base,
}
-static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
+static void CopyPackedSmiToDoubleElements(Handle<FixedArrayBase> from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
uint32_t to_start,
int packed_size,
int raw_copy_size) {
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
uint32_t to_end;
if (raw_copy_size < 0) {
@@ -414,7 +402,7 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
to_end = to_base->length();
for (uint32_t i = to_start + copy_size; i < to_end; ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
}
} else {
to_end = to_start + static_cast<uint32_t>(copy_size);
@@ -427,8 +415,8 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
+ Handle<FixedArray> from = Handle<FixedArray>::cast(from_base);
+ Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size);
from_start < from_end; from_start++, to_start++) {
Object* smi = from->get(from_start);
@@ -438,11 +426,12 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
}
-static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
+static void CopyObjectToDoubleElements(Handle<FixedArrayBase> from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
uint32_t to_start,
int raw_copy_size) {
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@@ -450,20 +439,20 @@ static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
copy_size = from_base->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
}
}
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
- Object* the_hole = from->GetHeap()->the_hole_value();
+ Handle<FixedArray> from = Handle<FixedArray>::cast(from_base);
+ Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
+ Handle<Object> the_hole = from->GetIsolate()->factory()->the_hole_value();
for (uint32_t from_end = from_start + copy_size;
from_start < from_end; from_start++, to_start++) {
Object* hole_or_object = from->get(from_start);
- if (hole_or_object == the_hole) {
+ if (hole_or_object == *the_hole) {
to->set_the_hole(to_start);
} else {
to->set(to_start, hole_or_object->Number());
@@ -472,12 +461,14 @@ static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
}
-static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base,
+static void CopyDictionaryToDoubleElements(Handle<FixedArrayBase> from_base,
uint32_t from_start,
- FixedArrayBase* to_base,
+ Handle<FixedArrayBase> to_base,
uint32_t to_start,
int raw_copy_size) {
- SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
+ Handle<SeededNumberDictionary> from =
+ Handle<SeededNumberDictionary>::cast(from_base);
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (copy_size < 0) {
ASSERT(copy_size == ElementsAccessor::kCopyToEnd ||
@@ -485,12 +476,12 @@ static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base,
copy_size = from->max_number_key() + 1 - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
}
}
}
if (copy_size == 0) return;
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
+ Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
@@ -592,7 +583,9 @@ class ElementsAccessorBase : public ElementsAccessor {
typedef ElementsTraitsParam ElementsTraits;
typedef typename ElementsTraitsParam::BackingStore BackingStore;
- virtual ElementsKind kind() const { return ElementsTraits::Kind; }
+ virtual ElementsKind kind() const V8_FINAL V8_OVERRIDE {
+ return ElementsTraits::Kind;
+ }
static void ValidateContents(JSObject* holder, int length) {
}
@@ -616,7 +609,7 @@ class ElementsAccessorBase : public ElementsAccessor {
ElementsAccessorSubclass::ValidateContents(holder, length);
}
- virtual void Validate(JSObject* holder) {
+ virtual void Validate(JSObject* holder) V8_FINAL V8_OVERRIDE {
ElementsAccessorSubclass::ValidateImpl(holder);
}
@@ -631,7 +624,7 @@ class ElementsAccessorBase : public ElementsAccessor {
virtual bool HasElement(Object* receiver,
JSObject* holder,
uint32_t key,
- FixedArrayBase* backing_store) {
+ FixedArrayBase* backing_store) V8_FINAL V8_OVERRIDE {
if (backing_store == NULL) {
backing_store = holder->elements();
}
@@ -639,10 +632,24 @@ class ElementsAccessorBase : public ElementsAccessor {
receiver, holder, key, backing_store);
}
- MUST_USE_RESULT virtual MaybeObject* Get(Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store) {
+ // TODO(ishell): Temporary wrapper until handlified.
+ MUST_USE_RESULT virtual Handle<Object> Get(
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
+ uint32_t key,
+ Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE {
+ CALL_HEAP_FUNCTION(holder->GetIsolate(),
+ Get(*receiver, *holder, key,
+ backing_store.is_null()
+ ? NULL : *backing_store),
+ Object);
+ }
+
+ MUST_USE_RESULT virtual MaybeObject* Get(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store) V8_FINAL V8_OVERRIDE {
if (backing_store == NULL) {
backing_store = holder->elements();
}
@@ -674,7 +681,7 @@ class ElementsAccessorBase : public ElementsAccessor {
Object* receiver,
JSObject* holder,
uint32_t key,
- FixedArrayBase* backing_store) {
+ FixedArrayBase* backing_store) V8_FINAL V8_OVERRIDE {
if (backing_store == NULL) {
backing_store = holder->elements();
}
@@ -697,7 +704,7 @@ class ElementsAccessorBase : public ElementsAccessor {
Object* receiver,
JSObject* holder,
uint32_t key,
- FixedArrayBase* backing_store) {
+ FixedArrayBase* backing_store) V8_FINAL V8_OVERRIDE {
if (backing_store == NULL) {
backing_store = holder->elements();
}
@@ -721,7 +728,7 @@ class ElementsAccessorBase : public ElementsAccessor {
Object* receiver,
JSObject* holder,
uint32_t key,
- FixedArrayBase* backing_store) {
+ FixedArrayBase* backing_store) V8_FINAL V8_OVERRIDE {
if (backing_store == NULL) {
backing_store = holder->elements();
}
@@ -737,73 +744,73 @@ class ElementsAccessorBase : public ElementsAccessor {
return NULL;
}
- MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* array,
- Object* length) {
+ MUST_USE_RESULT virtual Handle<Object> SetLength(
+ Handle<JSArray> array,
+ Handle<Object> length) V8_FINAL V8_OVERRIDE {
return ElementsAccessorSubclass::SetLengthImpl(
- array, length, array->elements());
+ array, length, handle(array->elements()));
}
- MUST_USE_RESULT static MaybeObject* SetLengthImpl(
- JSObject* obj,
- Object* length,
- FixedArrayBase* backing_store);
+ MUST_USE_RESULT static Handle<Object> SetLengthImpl(
+ Handle<JSObject> obj,
+ Handle<Object> length,
+ Handle<FixedArrayBase> backing_store);
- MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(
- JSArray* array,
+ virtual void SetCapacityAndLength(
+ Handle<JSArray> array,
int capacity,
- int length) {
- return ElementsAccessorSubclass::SetFastElementsCapacityAndLength(
- array,
- capacity,
- length);
+ int length) V8_FINAL V8_OVERRIDE {
+ ElementsAccessorSubclass::
+ SetFastElementsCapacityAndLength(array, capacity, length);
}
- MUST_USE_RESULT static MaybeObject* SetFastElementsCapacityAndLength(
- JSObject* obj,
+ static void SetFastElementsCapacityAndLength(
+ Handle<JSObject> obj,
int capacity,
int length) {
UNIMPLEMENTED();
- return obj;
}
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) = 0;
-
- MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
+ MUST_USE_RESULT virtual Handle<Object> Delete(
+ Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) V8_OVERRIDE = 0;
+
+ static void CopyElementsImpl(Handle<FixedArrayBase> from,
+ uint32_t from_start,
+ Handle<FixedArrayBase> to,
+ ElementsKind from_kind,
+ uint32_t to_start,
+ int packed_size,
+ int copy_size) {
UNREACHABLE();
- return NULL;
}
- MUST_USE_RESULT virtual MaybeObject* CopyElements(JSObject* from_holder,
- uint32_t from_start,
- ElementsKind from_kind,
- FixedArrayBase* to,
- uint32_t to_start,
- int copy_size,
- FixedArrayBase* from) {
+ virtual void CopyElements(
+ Handle<JSObject> from_holder,
+ uint32_t from_start,
+ ElementsKind from_kind,
+ Handle<FixedArrayBase> to,
+ uint32_t to_start,
+ int copy_size,
+ Handle<FixedArrayBase> from) V8_FINAL V8_OVERRIDE {
int packed_size = kPackedSizeNotKnown;
- if (from == NULL) {
- from = from_holder->elements();
+ if (from.is_null()) {
+ from = handle(from_holder->elements());
}
- if (from_holder) {
+ if (!from_holder.is_null()) {
bool is_packed = IsFastPackedElementsKind(from_kind) &&
from_holder->IsJSArray();
if (is_packed) {
- packed_size = Smi::cast(JSArray::cast(from_holder)->length())->value();
+ packed_size =
+ Smi::cast(Handle<JSArray>::cast(from_holder)->length())->value();
if (copy_size >= 0 && packed_size > copy_size) {
packed_size = copy_size;
}
}
}
- return ElementsAccessorSubclass::CopyElementsImpl(
+ ElementsAccessorSubclass::CopyElementsImpl(
from, from_start, to, from_kind, to_start, packed_size, copy_size);
}
@@ -811,7 +818,7 @@ class ElementsAccessorBase : public ElementsAccessor {
Object* receiver,
JSObject* holder,
FixedArray* to,
- FixedArrayBase* from) {
+ FixedArrayBase* from) V8_FINAL V8_OVERRIDE {
int len0 = to->length();
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
@@ -889,7 +896,8 @@ class ElementsAccessorBase : public ElementsAccessor {
return backing_store->length();
}
- virtual uint32_t GetCapacity(FixedArrayBase* backing_store) {
+ virtual uint32_t GetCapacity(FixedArrayBase* backing_store)
+ V8_FINAL V8_OVERRIDE {
return ElementsAccessorSubclass::GetCapacityImpl(backing_store);
}
@@ -899,7 +907,7 @@ class ElementsAccessorBase : public ElementsAccessor {
}
virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
- uint32_t index) {
+ uint32_t index) V8_FINAL V8_OVERRIDE {
return ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, index);
}
@@ -920,34 +928,34 @@ class FastElementsAccessor
KindTraits>(name) {}
protected:
friend class ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits>;
- friend class NonStrictArgumentsElementsAccessor;
+ friend class SloppyArgumentsElementsAccessor;
typedef typename KindTraits::BackingStore BackingStore;
// Adjusts the length of the fast backing store or returns the new length or
// undefined in case conversion to a slow backing store should be performed.
- static MaybeObject* SetLengthWithoutNormalize(FixedArrayBase* backing_store,
- JSArray* array,
- Object* length_object,
- uint32_t length) {
+ static Handle<Object> SetLengthWithoutNormalize(
+ Handle<FixedArrayBase> backing_store,
+ Handle<JSArray> array,
+ Handle<Object> length_object,
+ uint32_t length) {
+ Isolate* isolate = array->GetIsolate();
uint32_t old_capacity = backing_store->length();
- Object* old_length = array->length();
+ Handle<Object> old_length(array->length(), isolate);
bool same_or_smaller_size = old_length->IsSmi() &&
- static_cast<uint32_t>(Smi::cast(old_length)->value()) >= length;
+ static_cast<uint32_t>(Handle<Smi>::cast(old_length)->value()) >= length;
ElementsKind kind = array->GetElementsKind();
if (!same_or_smaller_size && IsFastElementsKind(kind) &&
!IsFastHoleyElementsKind(kind)) {
kind = GetHoleyElementsKind(kind);
- MaybeObject* maybe_obj = array->TransitionElementsKind(kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
+ JSObject::TransitionElementsKind(array, kind);
}
// Check whether the backing store should be shrunk.
if (length <= old_capacity) {
if (array->HasFastSmiOrObjectElements()) {
- MaybeObject* maybe_obj = array->EnsureWritableFastElements();
- if (!maybe_obj->To(&backing_store)) return maybe_obj;
+ backing_store = JSObject::EnsureWritableFastElements(array);
}
if (2 * length <= old_capacity) {
// If more than half the elements won't be used, trim the array.
@@ -964,7 +972,7 @@ class FastElementsAccessor
// Otherwise, fill the unused tail with holes.
int old_length = FastD2IChecked(array->length()->Number());
for (int i = length; i < old_length; i++) {
- BackingStore::cast(backing_store)->set_the_hole(i);
+ Handle<BackingStore>::cast(backing_store)->set_the_hole(i);
}
}
return length_object;
@@ -974,53 +982,48 @@ class FastElementsAccessor
uint32_t min = JSObject::NewElementsCapacity(old_capacity);
uint32_t new_capacity = length > min ? length : min;
if (!array->ShouldConvertToSlowElements(new_capacity)) {
- MaybeObject* result = FastElementsAccessorSubclass::
+ FastElementsAccessorSubclass::
SetFastElementsCapacityAndLength(array, new_capacity, length);
- if (result->IsFailure()) return result;
array->ValidateElements();
return length_object;
}
// Request conversion to slow elements.
- return array->GetHeap()->undefined_value();
+ return isolate->factory()->undefined_value();
}
- static MaybeObject* DeleteCommon(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
+ static Handle<Object> DeleteCommon(Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) {
ASSERT(obj->HasFastSmiOrObjectElements() ||
obj->HasFastDoubleElements() ||
obj->HasFastArgumentsElements());
+ Isolate* isolate = obj->GetIsolate();
Heap* heap = obj->GetHeap();
- Object* elements = obj->elements();
- if (elements == heap->empty_fixed_array()) {
- return heap->true_value();
- }
- typename KindTraits::BackingStore* backing_store =
- KindTraits::BackingStore::cast(elements);
- bool is_non_strict_arguments_elements_map =
- backing_store->map() == heap->non_strict_arguments_elements_map();
- if (is_non_strict_arguments_elements_map) {
- backing_store = KindTraits::BackingStore::cast(
- FixedArray::cast(backing_store)->get(1));
+ Handle<FixedArrayBase> elements(obj->elements());
+ if (*elements == heap->empty_fixed_array()) {
+ return isolate->factory()->true_value();
+ }
+ Handle<BackingStore> backing_store = Handle<BackingStore>::cast(elements);
+ bool is_sloppy_arguments_elements_map =
+ backing_store->map() == heap->sloppy_arguments_elements_map();
+ if (is_sloppy_arguments_elements_map) {
+ backing_store = handle(
+ BackingStore::cast(Handle<FixedArray>::cast(backing_store)->get(1)));
}
uint32_t length = static_cast<uint32_t>(
obj->IsJSArray()
- ? Smi::cast(JSArray::cast(obj)->length())->value()
+ ? Smi::cast(Handle<JSArray>::cast(obj)->length())->value()
: backing_store->length());
if (key < length) {
- if (!is_non_strict_arguments_elements_map) {
+ if (!is_sloppy_arguments_elements_map) {
ElementsKind kind = KindTraits::Kind;
if (IsFastPackedElementsKind(kind)) {
- MaybeObject* transitioned =
- obj->TransitionElementsKind(GetHoleyElementsKind(kind));
- if (transitioned->IsFailure()) return transitioned;
+ JSObject::TransitionElementsKind(obj, GetHoleyElementsKind(kind));
}
if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
- Object* writable;
- MaybeObject* maybe = obj->EnsureWritableFastElements();
- if (!maybe->ToObject(&writable)) return maybe;
- backing_store = KindTraits::BackingStore::cast(writable);
+ Handle<Object> writable = JSObject::EnsureWritableFastElements(obj);
+ backing_store = Handle<BackingStore>::cast(writable);
}
}
backing_store->set_the_hole(key);
@@ -1030,7 +1033,7 @@ class FastElementsAccessor
// one adjacent hole to the value being deleted.
const int kMinLengthForSparsenessCheck = 64;
if (backing_store->length() >= kMinLengthForSparsenessCheck &&
- !heap->InNewSpace(backing_store) &&
+ !heap->InNewSpace(*backing_store) &&
((key > 0 && backing_store->is_the_hole(key - 1)) ||
(key + 1 < length && backing_store->is_the_hole(key + 1)))) {
int num_used = 0;
@@ -1040,17 +1043,17 @@ class FastElementsAccessor
if (4 * num_used > backing_store->length()) break;
}
if (4 * num_used <= backing_store->length()) {
- MaybeObject* result = obj->NormalizeElements();
- if (result->IsFailure()) return result;
+ JSObject::NormalizeElements(obj);
}
}
}
- return heap->true_value();
+ return isolate->factory()->true_value();
}
- virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
+ virtual Handle<Object> Delete(
+ Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
return DeleteCommon(obj, key, mode);
}
@@ -1077,8 +1080,7 @@ class FastElementsAccessor
((map == heap->fixed_array_map() && length == 0) ||
map == heap->fixed_double_array_map())));
for (int i = 0; i < length; i++) {
- typename KindTraits::BackingStore* backing_store =
- KindTraits::BackingStore::cast(elements);
+ BackingStore* backing_store = BackingStore::cast(elements);
ASSERT((!IsFastSmiElementsKind(KindTraits::Kind) ||
static_cast<Object*>(backing_store->get(i))->IsSmi()) ||
(IsFastHoleyElementsKind(KindTraits::Kind) ==
@@ -1128,13 +1130,13 @@ class FastSmiOrObjectElementsAccessor
KindTraits,
kPointerSize>(name) {}
- static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
+ static void CopyElementsImpl(Handle<FixedArrayBase> from,
+ uint32_t from_start,
+ Handle<FixedArrayBase> to,
+ ElementsKind from_kind,
+ uint32_t to_start,
+ int packed_size,
+ int copy_size) {
ElementsKind to_kind = KindTraits::Kind;
switch (from_kind) {
case FAST_SMI_ELEMENTS:
@@ -1143,24 +1145,27 @@ class FastSmiOrObjectElementsAccessor
case FAST_HOLEY_ELEMENTS:
CopyObjectToObjectElements(
from, from_kind, from_start, to, to_kind, to_start, copy_size);
- return to->GetHeap()->undefined_value();
+ break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
- return CopyDoubleToObjectElements(
+ CopyDoubleToObjectElements(
from, from_start, to, to_kind, to_start, copy_size);
+ break;
case DICTIONARY_ELEMENTS:
CopyDictionaryToObjectElements(
from, from_start, to, to_kind, to_start, copy_size);
- return to->GetHeap()->undefined_value();
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ break;
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
// TODO(verwaest): This is a temporary hack to support extending
- // NON_STRICT_ARGUMENTS_ELEMENTS in SetFastElementsCapacityAndLength.
+ // SLOPPY_ARGUMENTS_ELEMENTS in SetFastElementsCapacityAndLength.
// This case should be UNREACHABLE().
- FixedArray* parameter_map = FixedArray::cast(from);
- FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
- ElementsKind from_kind = ElementsKindForArray(arguments);
- return CopyElementsImpl(arguments, from_start, to, from_kind,
- to_start, packed_size, copy_size);
+ Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(from);
+ Handle<FixedArrayBase> arguments(
+ FixedArrayBase::cast(parameter_map->get(1)));
+ ElementsKind from_kind = ElementsKindForArray(*arguments);
+ CopyElementsImpl(arguments, from_start, to, from_kind,
+ to_start, packed_size, copy_size);
+ break;
}
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case EXTERNAL_##TYPE##_ELEMENTS: \
@@ -1169,20 +1174,19 @@ class FastSmiOrObjectElementsAccessor
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
}
- return NULL;
}
- static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
- uint32_t capacity,
- uint32_t length) {
+ static void SetFastElementsCapacityAndLength(
+ Handle<JSObject> obj,
+ uint32_t capacity,
+ uint32_t length) {
JSObject::SetFastElementsCapacitySmiMode set_capacity_mode =
obj->HasFastSmiElements()
? JSObject::kAllowSmiElements
: JSObject::kDontAllowSmiElements;
- return obj->SetFastElementsCapacityAndLength(capacity,
- length,
- set_capacity_mode);
+ JSObject::SetFastElementsCapacityAndLength(
+ obj, capacity, length, set_capacity_mode);
}
};
@@ -1247,21 +1251,20 @@ class FastDoubleElementsAccessor
KindTraits,
kDoubleSize>(name) {}
- static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
- uint32_t capacity,
- uint32_t length) {
- return obj->SetFastDoubleElementsCapacityAndLength(capacity,
- length);
+ static void SetFastElementsCapacityAndLength(Handle<JSObject> obj,
+ uint32_t capacity,
+ uint32_t length) {
+ JSObject::SetFastDoubleElementsCapacityAndLength(obj, capacity, length);
}
protected:
- static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
+ static void CopyElementsImpl(Handle<FixedArrayBase> from,
+ uint32_t from_start,
+ Handle<FixedArrayBase> to,
+ ElementsKind from_kind,
+ uint32_t to_start,
+ int packed_size,
+ int copy_size) {
switch (from_kind) {
case FAST_SMI_ELEMENTS:
CopyPackedSmiToDoubleElements(
@@ -1282,7 +1285,7 @@ class FastDoubleElementsAccessor
CopyDictionaryToDoubleElements(
from, from_start, to, to_start, copy_size);
break;
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
@@ -1292,7 +1295,6 @@ class FastDoubleElementsAccessor
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
}
- return to->GetHeap()->undefined_value();
}
};
@@ -1373,20 +1375,21 @@ class TypedElementsAccessor
? FIELD : NONEXISTENT;
}
- MUST_USE_RESULT static MaybeObject* SetLengthImpl(
- JSObject* obj,
- Object* length,
- FixedArrayBase* backing_store) {
+ MUST_USE_RESULT static Handle<Object> SetLengthImpl(
+ Handle<JSObject> obj,
+ Handle<Object> length,
+ Handle<FixedArrayBase> backing_store) {
// External arrays do not support changing their length.
UNREACHABLE();
return obj;
}
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
+ MUST_USE_RESULT virtual Handle<Object> Delete(
+ Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
// External arrays always ignore deletes.
- return obj->GetHeap()->true_value();
+ return obj->GetIsolate()->factory()->true_value();
}
static bool HasElementImpl(Object* receiver,
@@ -1484,6 +1487,18 @@ class DictionaryElementsAccessor
return length_object;
}
+ // TODO(ishell): Temporary wrapper until handlified.
+ MUST_USE_RESULT static Handle<Object> SetLengthWithoutNormalize(
+ Handle<FixedArrayBase> store,
+ Handle<JSArray> array,
+ Handle<Object> length_object,
+ uint32_t length) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ SetLengthWithoutNormalize(
+ *store, *array, *length_object, length),
+ Object);
+ }
+
MUST_USE_RESULT static MaybeObject* DeleteCommon(
JSObject* obj,
uint32_t key,
@@ -1492,7 +1507,7 @@ class DictionaryElementsAccessor
Heap* heap = isolate->heap();
FixedArray* backing_store = FixedArray::cast(obj->elements());
bool is_arguments =
- (obj->GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS);
+ (obj->GetElementsKind() == SLOPPY_ARGUMENTS_ELEMENTS);
if (is_arguments) {
backing_store = FixedArray::cast(backing_store->get(1));
}
@@ -1529,15 +1544,24 @@ class DictionaryElementsAccessor
return heap->true_value();
}
- MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
+ // TODO(ishell): Temporary wrapper until handlified.
+ MUST_USE_RESULT static Handle<Object> DeleteCommon(
+ Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) {
+ CALL_HEAP_FUNCTION(obj->GetIsolate(),
+ DeleteCommon(*obj, key, mode),
+ Object);
+ }
+
+ static void CopyElementsImpl(Handle<FixedArrayBase> from,
+ uint32_t from_start,
+ Handle<FixedArrayBase> to,
+ ElementsKind from_kind,
+ uint32_t to_start,
+ int packed_size,
+ int copy_size) {
UNREACHABLE();
- return NULL;
}
@@ -1545,9 +1569,10 @@ class DictionaryElementsAccessor
friend class ElementsAccessorBase<DictionaryElementsAccessor,
ElementsKindTraits<DICTIONARY_ELEMENTS> >;
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
+ MUST_USE_RESULT virtual Handle<Object> Delete(
+ Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
return DeleteCommon(obj, key, mode);
}
@@ -1632,18 +1657,18 @@ class DictionaryElementsAccessor
};
-class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
- NonStrictArgumentsElementsAccessor,
- ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> > {
+class SloppyArgumentsElementsAccessor : public ElementsAccessorBase<
+ SloppyArgumentsElementsAccessor,
+ ElementsKindTraits<SLOPPY_ARGUMENTS_ELEMENTS> > {
public:
- explicit NonStrictArgumentsElementsAccessor(const char* name)
+ explicit SloppyArgumentsElementsAccessor(const char* name)
: ElementsAccessorBase<
- NonStrictArgumentsElementsAccessor,
- ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >(name) {}
+ SloppyArgumentsElementsAccessor,
+ ElementsKindTraits<SLOPPY_ARGUMENTS_ELEMENTS> >(name) {}
protected:
friend class ElementsAccessorBase<
- NonStrictArgumentsElementsAccessor,
- ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >;
+ SloppyArgumentsElementsAccessor,
+ ElementsKindTraits<SLOPPY_ARGUMENTS_ELEMENTS> >;
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
@@ -1727,28 +1752,30 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
}
}
- MUST_USE_RESULT static MaybeObject* SetLengthImpl(
- JSObject* obj,
- Object* length,
- FixedArrayBase* parameter_map) {
+ MUST_USE_RESULT static Handle<Object> SetLengthImpl(
+ Handle<JSObject> obj,
+ Handle<Object> length,
+ Handle<FixedArrayBase> parameter_map) {
// TODO(mstarzinger): This was never implemented but will be used once we
// correctly implement [[DefineOwnProperty]] on arrays.
UNIMPLEMENTED();
return obj;
}
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key,
- JSReceiver::DeleteMode mode) {
- FixedArray* parameter_map = FixedArray::cast(obj->elements());
- Object* probe = GetParameterMapArg(obj, parameter_map, key);
+ MUST_USE_RESULT virtual Handle<Object> Delete(
+ Handle<JSObject> obj,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
+ Isolate* isolate = obj->GetIsolate();
+ Handle<FixedArray> parameter_map(FixedArray::cast(obj->elements()));
+ Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
// TODO(kmillikin): We could check if this was the last aliased
// parameter, and revert to normal elements in that case. That
// would enable GC of the context.
parameter_map->set_the_hole(key + 2);
} else {
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
if (arguments->IsDictionary()) {
return DictionaryElementsAccessor::DeleteCommon(obj, key, mode);
} else {
@@ -1758,18 +1785,17 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
return FastHoleyObjectElementsAccessor::DeleteCommon(obj, key, mode);
}
}
- return obj->GetHeap()->true_value();
+ return isolate->factory()->true_value();
}
- MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
- uint32_t from_start,
- FixedArrayBase* to,
- ElementsKind from_kind,
- uint32_t to_start,
- int packed_size,
- int copy_size) {
+ static void CopyElementsImpl(Handle<FixedArrayBase> from,
+ uint32_t from_start,
+ Handle<FixedArrayBase> to,
+ ElementsKind from_kind,
+ uint32_t to_start,
+ int packed_size,
+ int copy_size) {
UNREACHABLE();
- return NULL;
}
static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) {
@@ -1801,6 +1827,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
}
private:
+ // TODO(ishell): remove when all usages are handlified.
static Object* GetParameterMapArg(JSObject* holder,
FixedArray* parameter_map,
uint32_t key) {
@@ -1811,6 +1838,18 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
? parameter_map->get(key + 2)
: parameter_map->GetHeap()->the_hole_value();
}
+
+ static Handle<Object> GetParameterMapArg(Handle<JSObject> holder,
+ Handle<FixedArray> parameter_map,
+ uint32_t key) {
+ Isolate* isolate = holder->GetIsolate();
+ uint32_t length = holder->IsJSArray()
+ ? Smi::cast(Handle<JSArray>::cast(holder)->length())->value()
+ : parameter_map->length();
+ return key < (length - 2)
+ ? handle(parameter_map->get(key + 2), isolate)
+ : Handle<Object>::cast(isolate->factory()->the_hole_value());
+ }
};
@@ -1842,30 +1881,39 @@ void ElementsAccessor::TearDown() {
template <typename ElementsAccessorSubclass, typename ElementsKindTraits>
-MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
- ElementsKindTraits>::
- SetLengthImpl(JSObject* obj,
- Object* length,
- FixedArrayBase* backing_store) {
- JSArray* array = JSArray::cast(obj);
+MUST_USE_RESULT Handle<Object> ElementsAccessorBase<ElementsAccessorSubclass,
+ ElementsKindTraits>::
+ SetLengthImpl(Handle<JSObject> obj,
+ Handle<Object> length,
+ Handle<FixedArrayBase> backing_store) {
+ Isolate* isolate = obj->GetIsolate();
+ Handle<JSArray> array = Handle<JSArray>::cast(obj);
// Fast case: The new length fits into a Smi.
- MaybeObject* maybe_smi_length = length->ToSmi();
- Object* smi_length = Smi::FromInt(0);
- if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
- const int value = Smi::cast(smi_length)->value();
+ Handle<Object> smi_length = Object::ToSmi(isolate, length);
+
+ if (!smi_length.is_null() && smi_length->IsSmi()) {
+ const int value = Handle<Smi>::cast(smi_length)->value();
if (value >= 0) {
- Object* new_length;
- MaybeObject* result = ElementsAccessorSubclass::
+ Handle<Object> new_length = ElementsAccessorSubclass::
SetLengthWithoutNormalize(backing_store, array, smi_length, value);
- if (!result->ToObject(&new_length)) return result;
- ASSERT(new_length->IsSmi() || new_length->IsUndefined());
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, new_length, new_length);
+
+ // even though the proposed length was a smi, new_length could
+ // still be a heap number because SetLengthWithoutNormalize doesn't
+ // allow the array length property to drop below the index of
+ // non-deletable elements.
+ ASSERT(new_length->IsSmi() || new_length->IsHeapNumber() ||
+ new_length->IsUndefined());
if (new_length->IsSmi()) {
- array->set_length(Smi::cast(new_length));
+ array->set_length(*Handle<Smi>::cast(new_length));
+ return array;
+ } else if (new_length->IsHeapNumber()) {
+ array->set_length(*new_length);
return array;
}
} else {
- return ThrowArrayLengthRangeError(array->GetHeap());
+ return ThrowArrayLengthRangeError(isolate);
}
}
@@ -1874,97 +1922,89 @@ MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
if (length->IsNumber()) {
uint32_t value;
if (length->ToArrayIndex(&value)) {
- SeededNumberDictionary* dictionary;
- MaybeObject* maybe_object = array->NormalizeElements();
- if (!maybe_object->To(&dictionary)) return maybe_object;
- Object* new_length;
- MaybeObject* result = DictionaryElementsAccessor::
+ Handle<SeededNumberDictionary> dictionary =
+ JSObject::NormalizeElements(array);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, dictionary, dictionary);
+
+ Handle<Object> new_length = DictionaryElementsAccessor::
SetLengthWithoutNormalize(dictionary, array, length, value);
- if (!result->ToObject(&new_length)) return result;
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, new_length, new_length);
+
ASSERT(new_length->IsNumber());
- array->set_length(new_length);
+ array->set_length(*new_length);
return array;
} else {
- return ThrowArrayLengthRangeError(array->GetHeap());
+ return ThrowArrayLengthRangeError(isolate);
}
}
// Fall-back case: The new length is not a number so make the array
// size one and set only element to length.
- FixedArray* new_backing_store;
- MaybeObject* maybe_obj = array->GetHeap()->AllocateFixedArray(1);
- if (!maybe_obj->To(&new_backing_store)) return maybe_obj;
- new_backing_store->set(0, length);
- { MaybeObject* result = array->SetContent(new_backing_store);
- if (result->IsFailure()) return result;
- }
+ Handle<FixedArray> new_backing_store = isolate->factory()->NewFixedArray(1);
+ new_backing_store->set(0, *length);
+ JSArray::SetContent(array, new_backing_store);
return array;
}
-MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
- JSArray* array, Arguments* args) {
- Heap* heap = array->GetIsolate()->heap();
-
+Handle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
+ Arguments* args) {
// Optimize the case where there is one argument and the argument is a
// small smi.
if (args->length() == 1) {
- Object* obj = (*args)[0];
+ Handle<Object> obj = args->at<Object>(0);
if (obj->IsSmi()) {
- int len = Smi::cast(obj)->value();
+ int len = Handle<Smi>::cast(obj)->value();
if (len > 0 && len < JSObject::kInitialMaxFastElementArray) {
ElementsKind elements_kind = array->GetElementsKind();
- MaybeObject* maybe_array = array->Initialize(len, len);
- if (maybe_array->IsFailure()) return maybe_array;
+ JSArray::Initialize(array, len, len);
if (!IsFastHoleyElementsKind(elements_kind)) {
elements_kind = GetHoleyElementsKind(elements_kind);
- maybe_array = array->TransitionElementsKind(elements_kind);
- if (maybe_array->IsFailure()) return maybe_array;
+ JSObject::TransitionElementsKind(array, elements_kind);
}
-
return array;
} else if (len == 0) {
- return array->Initialize(JSArray::kPreallocatedArrayElements);
+ JSArray::Initialize(array, JSArray::kPreallocatedArrayElements);
+ return array;
}
}
// Take the argument as the length.
- MaybeObject* maybe_obj = array->Initialize(0);
- if (!maybe_obj->To(&obj)) return maybe_obj;
+ JSArray::Initialize(array, 0);
- return array->SetElementsLength((*args)[0]);
+ return JSArray::SetElementsLength(array, obj);
}
// Optimize the case where there are no parameters passed.
if (args->length() == 0) {
- return array->Initialize(JSArray::kPreallocatedArrayElements);
+ JSArray::Initialize(array, JSArray::kPreallocatedArrayElements);
+ return array;
}
+ Factory* factory = array->GetIsolate()->factory();
+
// Set length and elements on the array.
int number_of_elements = args->length();
- MaybeObject* maybe_object =
- array->EnsureCanContainElements(args, 0, number_of_elements,
- ALLOW_CONVERTED_DOUBLE_ELEMENTS);
- if (maybe_object->IsFailure()) return maybe_object;
+ JSObject::EnsureCanContainElements(
+ array, args, 0, number_of_elements, ALLOW_CONVERTED_DOUBLE_ELEMENTS);
// Allocate an appropriately typed elements array.
- MaybeObject* maybe_elms;
ElementsKind elements_kind = array->GetElementsKind();
+ Handle<FixedArrayBase> elms;
if (IsFastDoubleElementsKind(elements_kind)) {
- maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
- number_of_elements);
+ elms = Handle<FixedArrayBase>::cast(
+ factory->NewFixedDoubleArray(number_of_elements));
} else {
- maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
+ elms = Handle<FixedArrayBase>::cast(
+ factory->NewFixedArrayWithHoles(number_of_elements));
}
- FixedArrayBase* elms;
- if (!maybe_elms->To(&elms)) return maybe_elms;
// Fill in the content
switch (array->GetElementsKind()) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ELEMENTS: {
- FixedArray* smi_elms = FixedArray::cast(elms);
+ Handle<FixedArray> smi_elms = Handle<FixedArray>::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
smi_elms->set(index, (*args)[index], SKIP_WRITE_BARRIER);
}
@@ -1974,7 +2014,7 @@ MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
case FAST_ELEMENTS: {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- FixedArray* object_elms = FixedArray::cast(elms);
+ Handle<FixedArray> object_elms = Handle<FixedArray>::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
object_elms->set(index, (*args)[index], mode);
}
@@ -1982,7 +2022,8 @@ MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
}
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
- FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
+ Handle<FixedDoubleArray> double_elms =
+ Handle<FixedDoubleArray>::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
double_elms->set(index, (*args)[index]->Number());
}
@@ -1993,7 +2034,7 @@ MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
break;
}
- array->set_elements(elms);
+ array->set_elements(*elms);
array->set_length(Smi::FromInt(number_of_elements));
return array;
}
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 6353aaecf..44644abd9 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -65,6 +65,13 @@ class ElementsAccessor {
// can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
+ MUST_USE_RESULT virtual Handle<Object> Get(
+ Handle<Object> receiver,
+ Handle<JSObject> holder,
+ uint32_t key,
+ Handle<FixedArrayBase> backing_store =
+ Handle<FixedArrayBase>::null()) = 0;
+
MUST_USE_RESULT virtual MaybeObject* Get(
Object* receiver,
JSObject* holder,
@@ -109,8 +116,9 @@ class ElementsAccessor {
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
// have non-deletable elements can only be shrunk to the size of highest
// element that is non-deletable.
- MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* holder,
- Object* new_length) = 0;
+ MUST_USE_RESULT virtual Handle<Object> SetLength(
+ Handle<JSArray> holder,
+ Handle<Object> new_length) = 0;
// Modifies both the length and capacity of a JSArray, resizing the underlying
// backing store as necessary. This method does NOT honor the semantics of
@@ -118,14 +126,16 @@ class ElementsAccessor {
// elements. This method should only be called for array expansion OR by
// runtime JavaScript code that use InternalArrays and don't care about
// EcmaScript 5.1 semantics.
- MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(JSArray* array,
- int capacity,
- int length) = 0;
+ virtual void SetCapacityAndLength(
+ Handle<JSArray> array,
+ int capacity,
+ int length) = 0;
// Deletes an element in an object, returning a new elements backing store.
- MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* holder,
- uint32_t key,
- JSReceiver::DeleteMode mode) = 0;
+ MUST_USE_RESULT virtual Handle<Object> Delete(
+ Handle<JSObject> holder,
+ uint32_t key,
+ JSReceiver::DeleteMode mode) = 0;
// If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
// of elements from source after source_start to the destination array.
@@ -140,21 +150,22 @@ class ElementsAccessor {
// the source JSObject or JSArray in source_holder. If the holder's backing
// store is available, it can be passed in source and source_holder is
// ignored.
- MUST_USE_RESULT virtual MaybeObject* CopyElements(
- JSObject* source_holder,
+ virtual void CopyElements(
+ Handle<JSObject> source_holder,
uint32_t source_start,
ElementsKind source_kind,
- FixedArrayBase* destination,
+ Handle<FixedArrayBase> destination,
uint32_t destination_start,
int copy_size,
- FixedArrayBase* source = NULL) = 0;
-
- MUST_USE_RESULT MaybeObject* CopyElements(JSObject* from_holder,
- FixedArrayBase* to,
- ElementsKind from_kind,
- FixedArrayBase* from = NULL) {
- return CopyElements(from_holder, 0, from_kind, to, 0,
- kCopyToEndAndInitializeToHole, from);
+ Handle<FixedArrayBase> source = Handle<FixedArrayBase>::null()) = 0;
+
+ void CopyElements(
+ Handle<JSObject> from_holder,
+ Handle<FixedArrayBase> to,
+ ElementsKind from_kind,
+ Handle<FixedArrayBase> from = Handle<FixedArrayBase>::null()) {
+ CopyElements(from_holder, 0, from_kind, to, 0,
+ kCopyToEndAndInitializeToHole, from);
}
MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
@@ -175,7 +186,7 @@ class ElementsAccessor {
static void TearDown();
protected:
- friend class NonStrictArgumentsElementsAccessor;
+ friend class SloppyArgumentsElementsAccessor;
virtual uint32_t GetCapacity(FixedArrayBase* backing_store) = 0;
@@ -200,8 +211,8 @@ class ElementsAccessor {
void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key,
bool allow_appending = false);
-MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
- JSArray* array, Arguments* args);
+Handle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
+ Arguments* args);
} } // namespace v8::internal
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index da2d880a4..7442d1732 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -77,6 +77,13 @@ static Handle<Object> Invoke(bool is_construct,
// Entering JavaScript.
VMState<JS> state(isolate);
+ CHECK(AllowJavascriptExecution::IsAllowed(isolate));
+ if (!ThrowOnJavascriptExecution::IsAllowed(isolate)) {
+ isolate->ThrowIllegalOperation();
+ *has_pending_exception = true;
+ isolate->ReportPendingMessages();
+ return Handle<Object>();
+ }
// Placeholder for return value.
MaybeObject* value = reinterpret_cast<Object*>(kZapValue);
@@ -128,11 +135,6 @@ static Handle<Object> Invoke(bool is_construct,
ASSERT(*has_pending_exception == isolate->has_pending_exception());
if (*has_pending_exception) {
isolate->ReportPendingMessages();
- if (isolate->pending_exception()->IsOutOfMemory()) {
- if (!isolate->ignore_out_of_memory()) {
- V8::FatalProcessOutOfMemory("JS", true);
- }
- }
#ifdef ENABLE_DEBUGGER_SUPPORT
// Reset stepping state when script exits with uncaught exception.
if (isolate->debugger()->IsDebuggerActive()) {
@@ -163,9 +165,10 @@ Handle<Object> Execution::Call(Isolate* isolate,
}
Handle<JSFunction> func = Handle<JSFunction>::cast(callable);
- // In non-strict mode, convert receiver.
+ // In sloppy mode, convert receiver.
if (convert_receiver && !receiver->IsJSReceiver() &&
- !func->shared()->native() && func->shared()->is_classic_mode()) {
+ !func->shared()->native() &&
+ func->shared()->strict_mode() == SLOPPY) {
if (receiver->IsUndefined() || receiver->IsNull()) {
Object* global = func->context()->global_object()->global_receiver();
// Under some circumstances, 'global' can be the JSBuiltinsObject
@@ -217,9 +220,6 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
ASSERT(catcher.HasCaught());
ASSERT(isolate->has_pending_exception());
ASSERT(isolate->external_caught_exception());
- if (isolate->is_out_of_memory() && !isolate->ignore_out_of_memory()) {
- V8::FatalProcessOutOfMemory("OOM during Execution::TryCall");
- }
if (isolate->pending_exception() ==
isolate->heap()->termination_exception()) {
result = isolate->factory()->termination_exception();
@@ -368,6 +368,20 @@ void Execution::RunMicrotasks(Isolate* isolate) {
}
+void Execution::EnqueueMicrotask(Isolate* isolate, Handle<Object> microtask) {
+ bool threw = false;
+ Handle<Object> args[] = { microtask };
+ Execution::Call(
+ isolate,
+ isolate->enqueue_external_microtask(),
+ isolate->factory()->undefined_value(),
+ 1,
+ args,
+ &threw);
+ ASSERT(!threw);
+}
+
+
bool StackGuard::IsStackOverflow() {
ExecutionAccess access(isolate_);
return (thread_local_.jslimit_ != kInterruptLimit &&
@@ -502,15 +516,15 @@ void StackGuard::FullDeopt() {
}
-bool StackGuard::IsDeoptMarkedCode() {
+bool StackGuard::IsDeoptMarkedAllocationSites() {
ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & DEOPT_MARKED_CODE) != 0;
+ return (thread_local_.interrupt_flags_ & DEOPT_MARKED_ALLOCATION_SITES) != 0;
}
-void StackGuard::DeoptMarkedCode() {
+void StackGuard::DeoptMarkedAllocationSites() {
ExecutionAccess access(isolate_);
- thread_local_.interrupt_flags_ |= DEOPT_MARKED_CODE;
+ thread_local_.interrupt_flags_ |= DEOPT_MARKED_ALLOCATION_SITES;
set_interrupt_limits(access);
}
@@ -797,10 +811,10 @@ Handle<JSFunction> Execution::InstantiateFunction(
if (!data->do_not_cache()) {
// Fast case: see if the function has already been instantiated
int serial_number = Smi::cast(data->serial_number())->value();
- Object* elm =
- isolate->native_context()->function_cache()->
- GetElementNoExceptionThrown(isolate, serial_number);
- if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
+ Handle<JSObject> cache(isolate->native_context()->function_cache());
+ Handle<Object> elm =
+ Object::GetElementNoExceptionThrown(isolate, cache, serial_number);
+ if (elm->IsJSFunction()) return Handle<JSFunction>::cast(elm);
}
// The function has not yet been instantiated in this context; do it.
Handle<Object> args[] = { data };
@@ -1026,9 +1040,9 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(FULL_DEOPT);
Deoptimizer::DeoptimizeAll(isolate);
}
- if (stack_guard->IsDeoptMarkedCode()) {
- stack_guard->Continue(DEOPT_MARKED_CODE);
- Deoptimizer::DeoptimizeMarkedCode(isolate);
+ if (stack_guard->IsDeoptMarkedAllocationSites()) {
+ stack_guard->Continue(DEOPT_MARKED_ALLOCATION_SITES);
+ isolate->heap()->DeoptMarkedAllocationSites();
}
if (stack_guard->IsInstallCodeRequest()) {
ASSERT(isolate->concurrent_recompilation_enabled());
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index abf4f1dc6..592ecbdb6 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -45,7 +45,7 @@ enum InterruptFlag {
FULL_DEOPT = 1 << 6,
INSTALL_CODE = 1 << 7,
API_INTERRUPT = 1 << 8,
- DEOPT_MARKED_CODE = 1 << 9
+ DEOPT_MARKED_ALLOCATION_SITES = 1 << 9
};
@@ -175,6 +175,7 @@ class Execution : public AllStatic {
bool* has_pending_exception);
static void RunMicrotasks(Isolate* isolate);
+ static void EnqueueMicrotask(Isolate* isolate, Handle<Object> microtask);
};
@@ -222,8 +223,8 @@ class StackGuard {
void RequestInstallCode();
bool IsFullDeopt();
void FullDeopt();
- bool IsDeoptMarkedCode();
- void DeoptMarkedCode();
+ bool IsDeoptMarkedAllocationSites();
+ void DeoptMarkedAllocationSites();
void Continue(InterruptFlag after_what);
void RequestInterrupt(InterruptCallback callback, void* data);
@@ -281,7 +282,7 @@ class StackGuard {
void EnableInterrupts();
void DisableInterrupts();
-#if V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
#else
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index d372cf012..adc5577d9 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -107,7 +107,7 @@ void ExternalizeStringExtension::Externalize(
SimpleAsciiStringResource* resource = new SimpleAsciiStringResource(
reinterpret_cast<char*>(data), string->length());
result = string->MakeExternal(resource);
- if (result && !string->IsInternalizedString()) {
+ if (result) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
isolate->heap()->external_string_table()->AddString(*string);
}
@@ -118,7 +118,7 @@ void ExternalizeStringExtension::Externalize(
SimpleTwoByteStringResource* resource = new SimpleTwoByteStringResource(
data, string->length());
result = string->MakeExternal(resource);
- if (result && !string->IsInternalizedString()) {
+ if (result) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
isolate->heap()->external_string_table()->AddString(*string);
}
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index aead7be0c..0868db851 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -69,6 +69,14 @@ Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size,
}
+Handle<FixedArray> Factory::NewUninitializedFixedArray(int size) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateUninitializedFixedArray(size),
+ FixedArray);
+}
+
+
Handle<FixedDoubleArray> Factory::NewFixedDoubleArray(int size,
PretenureFlag pretenure) {
ASSERT(0 <= size);
@@ -81,14 +89,16 @@ Handle<FixedDoubleArray> Factory::NewFixedDoubleArray(int size,
Handle<ConstantPoolArray> Factory::NewConstantPoolArray(
int number_of_int64_entries,
- int number_of_ptr_entries,
+ int number_of_code_ptr_entries,
+ int number_of_heap_ptr_entries,
int number_of_int32_entries) {
- ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
- number_of_int32_entries > 0);
+ ASSERT(number_of_int64_entries > 0 || number_of_code_ptr_entries > 0 ||
+ number_of_heap_ptr_entries > 0 || number_of_int32_entries > 0);
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateConstantPoolArray(number_of_int64_entries,
- number_of_ptr_entries,
+ number_of_code_ptr_entries,
+ number_of_heap_ptr_entries,
number_of_int32_entries),
ConstantPoolArray);
}
@@ -279,7 +289,7 @@ Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
Handle<SeqOneByteString> Factory::NewRawOneByteString(int length,
- PretenureFlag pretenure) {
+ PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateRawOneByteString(length, pretenure),
@@ -375,9 +385,7 @@ Handle<String> Factory::NewConsString(Handle<String> left,
// Make sure that an out of memory exception is thrown if the length
// of the new cons string is too large.
if (length > String::kMaxLength || length < 0) {
- isolate()->context()->mark_out_of_memory();
- V8::FatalProcessOutOfMemory("String concatenation result too large.");
- UNREACHABLE();
+ isolate()->ThrowInvalidStringLength();
return Handle<String>::null();
}
@@ -403,6 +411,7 @@ Handle<String> Factory::NewConsString(Handle<String> left,
ASSERT(left->IsFlat());
ASSERT(right->IsFlat());
+ STATIC_ASSERT(ConsString::kMinLength <= String::kMaxLength);
if (is_one_byte) {
Handle<SeqOneByteString> result = NewRawOneByteString(length);
DisallowHeapAllocation no_gc;
@@ -488,12 +497,14 @@ Handle<String> Factory::NewProperSubString(Handle<String> str,
if (!FLAG_string_slices || length < SlicedString::kMinLength) {
if (str->IsOneByteRepresentation()) {
Handle<SeqOneByteString> result = NewRawOneByteString(length);
+ ASSERT(!result.is_null());
uint8_t* dest = result->GetChars();
DisallowHeapAllocation no_gc;
String::WriteToFlat(*str, dest, begin, end);
return result;
} else {
Handle<SeqTwoByteString> result = NewRawTwoByteString(length);
+ ASSERT(!result.is_null());
uc16* dest = result->GetChars();
DisallowHeapAllocation no_gc;
String::WriteToFlat(*str, dest, begin, end);
@@ -700,7 +711,6 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
script->set_id(Smi::FromInt(id));
script->set_line_offset(Smi::FromInt(0));
script->set_column_offset(Smi::FromInt(0));
- script->set_data(heap->undefined_value());
script->set_context_data(heap->undefined_value());
script->set_type(Smi::FromInt(Script::TYPE_NORMAL));
script->set_wrapper(*wrapper);
@@ -873,18 +883,17 @@ Handle<Map> Factory::CopyMap(Handle<Map> src) {
}
-Handle<Map> Factory::GetElementsTransitionMap(
- Handle<JSObject> src,
- ElementsKind elements_kind) {
- Isolate* i = isolate();
- CALL_HEAP_FUNCTION(i,
- src->GetElementsTransitionMap(i, elements_kind),
- Map);
+Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
+ CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedArray);
}
-Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
- CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedArray);
+Handle<FixedArray> Factory::CopyAndTenureFixedCOWArray(
+ Handle<FixedArray> array) {
+ ASSERT(isolate()->heap()->InNewSpace(*array));
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->CopyAndTenureFixedCOWArray(*array),
+ FixedArray);
}
@@ -926,7 +935,7 @@ Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
static Handle<Map> MapForNewFunction(Isolate *isolate,
Handle<SharedFunctionInfo> function_info) {
Context *context = isolate->context()->native_context();
- int map_index = Context::FunctionMapIndex(function_info->language_mode(),
+ int map_index = Context::FunctionMapIndex(function_info->strict_mode(),
function_info->is_generator());
return Handle<Map>(Map::cast(context->get(map_index)));
}
@@ -967,7 +976,9 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
FixedArray* literals =
function_info->GetLiteralsFromOptimizedCodeMap(index);
if (literals != NULL) result->set_literals(literals);
- result->ReplaceCode(function_info->GetCodeFromOptimizedCodeMap(index));
+ Code* code = function_info->GetCodeFromOptimizedCodeMap(index);
+ ASSERT(!code->marked_for_deoptimization());
+ result->ReplaceCode(code);
return result;
}
@@ -1064,6 +1075,12 @@ Handle<Object> Factory::NewReferenceError(const char* message,
}
+Handle<Object> Factory::NewReferenceError(const char* message,
+ Handle<JSArray> args) {
+ return NewError("MakeReferenceError", message, args);
+}
+
+
Handle<Object> Factory::NewReferenceError(Handle<String> message) {
return NewError("$ReferenceError", message);
}
@@ -1113,8 +1130,8 @@ Handle<String> Factory::EmergencyNewError(const char* message,
*p++ = ' ';
space--;
if (space > 0) {
- MaybeObject* maybe_arg = args->GetElement(isolate(), i);
- Handle<String> arg_str(reinterpret_cast<String*>(maybe_arg));
+ Handle<String> arg_str = Handle<String>::cast(
+ Object::GetElementNoExceptionThrown(isolate(), args, i));
SmartArrayPointer<char> arg = arg_str->ToCString();
Vector<char> v2(p, static_cast<int>(space));
OS::StrNCpy(v2, arg.get(), space);
@@ -1247,8 +1264,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
Handle<Code> code) {
- Handle<JSFunction> function = NewFunctionWithoutPrototype(name,
- CLASSIC_MODE);
+ Handle<JSFunction> function = NewFunctionWithoutPrototype(name, SLOPPY);
function->shared()->set_code(*code);
function->set_code(*code);
ASSERT(!function->has_initial_map());
@@ -1300,12 +1316,6 @@ Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
}
-Handle<String> Factory::InternalizedStringFromString(Handle<String> value) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->InternalizeString(*value), String);
-}
-
-
Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure) {
JSFunction::EnsureHasInitialMap(constructor);
@@ -1315,6 +1325,17 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
}
+Handle<JSObject> Factory::NewJSObjectWithMemento(
+ Handle<JSFunction> constructor,
+ Handle<AllocationSite> site) {
+ JSFunction::EnsureHasInitialMap(constructor);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateJSObject(*constructor, NOT_TENURED, *site),
+ JSObject);
+}
+
+
Handle<JSModule> Factory::NewJSModule(Handle<Context> context,
Handle<ScopeInfo> scope_info) {
CALL_HEAP_FUNCTION(
@@ -1397,18 +1418,26 @@ Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
}
-Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map,
- PretenureFlag pretenure,
- bool alloc_props) {
+Handle<JSObject> Factory::NewJSObjectFromMap(
+ Handle<Map> map,
+ PretenureFlag pretenure,
+ bool alloc_props,
+ Handle<AllocationSite> allocation_site) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateJSObjectFromMap(*map, pretenure, alloc_props),
+ isolate()->heap()->AllocateJSObjectFromMap(
+ *map,
+ pretenure,
+ alloc_props,
+ allocation_site.is_null() ? NULL : *allocation_site),
JSObject);
}
-Handle<JSArray> Factory::NewJSArray(int capacity,
- ElementsKind elements_kind,
+Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
+ int length,
+ int capacity,
+ ArrayStorageAllocationMode mode,
PretenureFlag pretenure) {
if (capacity != 0) {
elements_kind = GetHoleyElementsKind(elements_kind);
@@ -1416,9 +1445,9 @@ Handle<JSArray> Factory::NewJSArray(int capacity,
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateJSArrayAndStorage(
elements_kind,
- 0,
+ length,
capacity,
- INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE,
+ mode,
pretenure),
JSArray);
}
@@ -1426,32 +1455,28 @@ Handle<JSArray> Factory::NewJSArray(int capacity,
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
ElementsKind elements_kind,
+ int length,
PretenureFlag pretenure) {
+ ASSERT(length <= elements->length());
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateJSArrayWithElements(*elements,
elements_kind,
- elements->length(),
+ length,
pretenure),
JSArray);
}
-void Factory::SetElementsCapacityAndLength(Handle<JSArray> array,
- int capacity,
- int length) {
- ElementsAccessor* accessor = array->GetElementsAccessor();
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- accessor->SetCapacityAndLength(*array, capacity, length));
-}
-
-
-void Factory::SetContent(Handle<JSArray> array,
- Handle<FixedArrayBase> elements) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- array->SetContent(*elements));
+void Factory::NewJSArrayStorage(Handle<JSArray> array,
+ int length,
+ int capacity,
+ ArrayStorageAllocationMode mode) {
+ CALL_HEAP_FUNCTION_VOID(isolate(),
+ isolate()->heap()->AllocateJSArrayStorage(*array,
+ length,
+ capacity,
+ mode));
}
@@ -1572,7 +1597,6 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
int start_position,
int end_position,
Handle<Object> script,
- Handle<Object> stack_trace,
Handle<Object> stack_frames) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateJSMessageObject(*type,
@@ -1580,7 +1604,6 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
start_position,
end_position,
*script,
- *stack_trace,
*stack_frames),
JSMessageObject);
}
@@ -1630,7 +1653,7 @@ Handle<JSFunction> Factory::NewFunctionHelper(Handle<String> name,
Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateFunction(*isolate()->function_map(),
+ isolate()->heap()->AllocateFunction(*isolate()->sloppy_function_map(),
*function_share,
*prototype),
JSFunction);
@@ -1647,11 +1670,11 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name,
Handle<JSFunction> Factory::NewFunctionWithoutPrototypeHelper(
Handle<String> name,
- LanguageMode language_mode) {
+ StrictMode strict_mode) {
Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
- Handle<Map> map = (language_mode == CLASSIC_MODE)
- ? isolate()->function_without_prototype_map()
- : isolate()->strict_mode_function_without_prototype_map();
+ Handle<Map> map = strict_mode == SLOPPY
+ ? isolate()->sloppy_function_without_prototype_map()
+ : isolate()->strict_function_without_prototype_map();
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateFunction(
*map,
@@ -1663,9 +1686,8 @@ Handle<JSFunction> Factory::NewFunctionWithoutPrototypeHelper(
Handle<JSFunction> Factory::NewFunctionWithoutPrototype(
Handle<String> name,
- LanguageMode language_mode) {
- Handle<JSFunction> fun =
- NewFunctionWithoutPrototypeHelper(name, language_mode);
+ StrictMode strict_mode) {
+ Handle<JSFunction> fun = NewFunctionWithoutPrototypeHelper(name, strict_mode);
fun->set_context(isolate()->context()->native_context());
return fun;
}
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index db25b09a9..00f20ff8b 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -44,7 +44,7 @@ class Factory {
Handle<Object> value,
PretenureFlag pretenure = NOT_TENURED);
- // Allocate a new uninitialized fixed array.
+ // Allocates a fixed array initialized with undefined values.
Handle<FixedArray> NewFixedArray(
int size,
PretenureFlag pretenure = NOT_TENURED);
@@ -54,6 +54,9 @@ class Factory {
int size,
PretenureFlag pretenure = NOT_TENURED);
+ // Allocates an uninitialized fixed array. It must be filled by the caller.
+ Handle<FixedArray> NewUninitializedFixedArray(int size);
+
// Allocate a new uninitialized fixed double array.
Handle<FixedDoubleArray> NewFixedDoubleArray(
int size,
@@ -61,7 +64,8 @@ class Factory {
Handle<ConstantPoolArray> NewConstantPoolArray(
int number_of_int64_entries,
- int number_of_ptr_entries,
+ int number_of_code_ptr_entries,
+ int number_of_heap_ptr_entries,
int number_of_int32_entries);
Handle<SeededNumberDictionary> NewSeededNumberDictionary(
@@ -225,9 +229,6 @@ class Factory {
Handle<Context> previous,
Handle<ScopeInfo> scope_info);
- // Return the internalized version of the passed in string.
- Handle<String> InternalizedStringFromString(Handle<String> value);
-
// Allocate a new struct. The struct is pretenured (allocated directly in
// the old generation).
Handle<Struct> NewStruct(InstanceType type);
@@ -287,11 +288,12 @@ class Factory {
Handle<Map> CopyMap(Handle<Map> map, int extra_inobject_props);
Handle<Map> CopyMap(Handle<Map> map);
- Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
- ElementsKind elements_kind);
-
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
+ // This method expects a COW array in new space, and creates a copy
+ // of it in old space.
+ Handle<FixedArray> CopyAndTenureFixedCOWArray(Handle<FixedArray> array);
+
Handle<FixedArray> CopySizeFixedArray(Handle<FixedArray> array,
int new_length,
PretenureFlag pretenure = NOT_TENURED);
@@ -326,15 +328,20 @@ class Factory {
// runtime.
Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure = NOT_TENURED);
+ // JSObject that should have a memento pointing to the allocation site.
+ Handle<JSObject> NewJSObjectWithMemento(Handle<JSFunction> constructor,
+ Handle<AllocationSite> site);
// Global objects are pretenured and initialized based on a constructor.
Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor);
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
- Handle<JSObject> NewJSObjectFromMap(Handle<Map> map,
- PretenureFlag pretenure = NOT_TENURED,
- bool allocate_properties = true);
+ Handle<JSObject> NewJSObjectFromMap(
+ Handle<Map> map,
+ PretenureFlag pretenure = NOT_TENURED,
+ bool allocate_properties = true,
+ Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
Handle<JSObject> NewJSObjectFromMapForDeoptimizer(
Handle<Map> map, PretenureFlag pretenure = NOT_TENURED);
@@ -345,20 +352,39 @@ class Factory {
// JS arrays are pretenured when allocated by the parser.
Handle<JSArray> NewJSArray(
+ ElementsKind elements_kind,
+ int length,
int capacity,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ ArrayStorageAllocationMode mode = INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSArray> NewJSArray(
+ int capacity,
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ PretenureFlag pretenure = NOT_TENURED) {
+ return NewJSArray(elements_kind, 0, capacity,
+ INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE, pretenure);
+ }
+
Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArrayBase> elements,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ ElementsKind elements_kind,
+ int length,
PretenureFlag pretenure = NOT_TENURED);
- void SetElementsCapacityAndLength(Handle<JSArray> array,
- int capacity,
- int length);
+ Handle<JSArray> NewJSArrayWithElements(
+ Handle<FixedArrayBase> elements,
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
+ PretenureFlag pretenure = NOT_TENURED) {
+ return NewJSArrayWithElements(
+ elements, elements_kind, elements->length(), pretenure);
+ }
- void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> elements);
+ void NewJSArrayStorage(
+ Handle<JSArray> array,
+ int length,
+ int capacity,
+ ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
Handle<JSGeneratorObject> NewJSGeneratorObject(Handle<JSFunction> function);
@@ -379,7 +405,7 @@ class Factory {
Handle<JSFunction> NewFunctionWithoutPrototype(
Handle<String> name,
- LanguageMode language_mode);
+ StrictMode strict_mode);
Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
@@ -438,6 +464,7 @@ class Factory {
Handle<Object> NewReferenceError(const char* message,
Vector< Handle<Object> > args);
+ Handle<Object> NewReferenceError(const char* message, Handle<JSArray> args);
Handle<Object> NewReferenceError(Handle<String> message);
Handle<Object> NewEvalError(const char* message,
@@ -528,7 +555,6 @@ class Factory {
int start_position,
int end_position,
Handle<Object> script,
- Handle<Object> stack_trace,
Handle<Object> stack_frames);
Handle<SeededNumberDictionary> DictionaryAtNumberPut(
@@ -582,7 +608,7 @@ class Factory {
Handle<JSFunction> NewFunctionWithoutPrototypeHelper(
Handle<String> name,
- LanguageMode language_mode);
+ StrictMode strict_mode);
// Create a new map cache.
Handle<MapCache> NewMapCache(int at_least_space_for);
diff --git a/deps/v8/src/feedback-slots.h b/deps/v8/src/feedback-slots.h
new file mode 100644
index 000000000..9760c652b
--- /dev/null
+++ b/deps/v8/src/feedback-slots.h
@@ -0,0 +1,110 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FEEDBACK_SLOTS_H_
+#define V8_FEEDBACK_SLOTS_H_
+
+#include "v8.h"
+
+#include "isolate.h"
+
+namespace v8 {
+namespace internal {
+
+enum ComputablePhase {
+ DURING_PARSE,
+ AFTER_SCOPING
+};
+
+
+class FeedbackSlotInterface {
+ public:
+ static const int kInvalidFeedbackSlot = -1;
+
+ virtual ~FeedbackSlotInterface() {}
+
+ // When can we ask how many feedback slots are necessary?
+ virtual ComputablePhase GetComputablePhase() = 0;
+ virtual int ComputeFeedbackSlotCount(Isolate* isolate) = 0;
+ virtual void SetFirstFeedbackSlot(int slot) = 0;
+};
+
+
+class DeferredFeedbackSlotProcessor {
+ public:
+ DeferredFeedbackSlotProcessor()
+ : slot_nodes_(NULL),
+ slot_count_(0) { }
+
+ void add_slot_node(Zone* zone, FeedbackSlotInterface* slot) {
+ if (slot->GetComputablePhase() == DURING_PARSE) {
+ // No need to add to the list
+ int count = slot->ComputeFeedbackSlotCount(zone->isolate());
+ slot->SetFirstFeedbackSlot(slot_count_);
+ slot_count_ += count;
+ } else {
+ if (slot_nodes_ == NULL) {
+ slot_nodes_ = new(zone) ZoneList<FeedbackSlotInterface*>(10, zone);
+ }
+ slot_nodes_->Add(slot, zone);
+ }
+ }
+
+ void ProcessFeedbackSlots(Isolate* isolate) {
+ // Scope analysis must have been done.
+ if (slot_nodes_ == NULL) {
+ return;
+ }
+
+ int current_slot = slot_count_;
+ for (int i = 0; i < slot_nodes_->length(); i++) {
+ FeedbackSlotInterface* slot_interface = slot_nodes_->at(i);
+ int count = slot_interface->ComputeFeedbackSlotCount(isolate);
+ if (count > 0) {
+ slot_interface->SetFirstFeedbackSlot(current_slot);
+ current_slot += count;
+ }
+ }
+
+ slot_count_ = current_slot;
+ slot_nodes_->Clear();
+ }
+
+ int slot_count() {
+ ASSERT(slot_count_ >= 0);
+ return slot_count_;
+ }
+
+ private:
+ ZoneList<FeedbackSlotInterface*>* slot_nodes_;
+ int slot_count_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_FEEDBACK_SLOTS_H_
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index c0eaf16da..b93d03b59 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -167,10 +167,7 @@ struct MaybeBoolFlag {
// Flags for language modes and experimental language features.
DEFINE_bool(use_strict, false, "enforce strict mode")
-DEFINE_bool(es5_readonly, true,
- "activate correct semantics for inheriting readonliness")
-DEFINE_bool(es52_globals, true,
- "activate new semantics for global var declarations")
+DEFINE_bool(es_staging, false, "enable upcoming ES6+ features")
DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
@@ -178,12 +175,9 @@ DEFINE_bool(harmony_modules, false,
"enable harmony modules (implies block scoping)")
DEFINE_bool(harmony_symbols, false,
"enable harmony symbols (a.k.a. private names)")
-DEFINE_bool(harmony_promises, false, "enable harmony promises")
DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
DEFINE_bool(harmony_collections, false,
- "enable harmony collections (sets, maps, and weak maps)")
-DEFINE_bool(harmony_observation, false,
- "enable harmony object observation (implies harmony collections")
+ "enable harmony collections (sets, maps)")
DEFINE_bool(harmony_generators, false, "enable harmony generators")
DEFINE_bool(harmony_iteration, false, "enable harmony iteration (for-of)")
DEFINE_bool(harmony_numeric_literals, false,
@@ -192,22 +186,21 @@ DEFINE_bool(harmony_strings, false, "enable harmony string")
DEFINE_bool(harmony_arrays, false, "enable harmony arrays")
DEFINE_bool(harmony_maths, false, "enable harmony math functions")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
+
DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules)
DEFINE_implication(harmony, harmony_symbols)
-DEFINE_implication(harmony, harmony_promises)
DEFINE_implication(harmony, harmony_proxies)
DEFINE_implication(harmony, harmony_collections)
-DEFINE_implication(harmony, harmony_observation)
DEFINE_implication(harmony, harmony_generators)
DEFINE_implication(harmony, harmony_iteration)
DEFINE_implication(harmony, harmony_numeric_literals)
DEFINE_implication(harmony, harmony_strings)
DEFINE_implication(harmony, harmony_arrays)
-DEFINE_implication(harmony, harmony_maths)
-DEFINE_implication(harmony_promises, harmony_collections)
DEFINE_implication(harmony_modules, harmony_scoping)
-DEFINE_implication(harmony_observation, harmony_collections)
+
+DEFINE_implication(harmony, es_staging)
+DEFINE_implication(es_staging, harmony_maths)
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
@@ -234,7 +227,6 @@ DEFINE_implication(track_double_fields, track_fields)
DEFINE_implication(track_heap_object_fields, track_fields)
DEFINE_implication(track_computed_fields, track_fields)
DEFINE_bool(smi_binop, true, "support smi representation in binary operations")
-DEFINE_bool(smi_x64_store_opt, false, "optimized stores of smi on x64")
// Flags for optimization types.
DEFINE_bool(optimize_for_size, false,
@@ -248,13 +240,15 @@ DEFINE_bool(string_slices, true, "use string slices")
// Flags for Crankshaft.
DEFINE_bool(crankshaft, true, "use crankshaft")
DEFINE_string(hydrogen_filter, "*", "optimization filter")
-DEFINE_bool(use_range, true, "use hydrogen range analysis")
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
DEFINE_int(gvn_iterations, 3, "maximum number of GVN fix-point iterations")
DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining")
DEFINE_bool(use_escape_analysis, true, "use hydrogen escape analysis")
DEFINE_bool(use_allocation_folding, true, "use allocation folding")
+DEFINE_bool(use_local_allocation_folding, false, "only fold in basic blocks")
+DEFINE_bool(use_write_barrier_elimination, true,
+ "eliminate write barriers targeting allocations in optimized code")
DEFINE_int(max_inlining_levels, 5, "maximum number of inlining levels")
DEFINE_int(max_inlined_source_size, 600,
"maximum source size in bytes considered for a single inlining")
@@ -275,6 +269,7 @@ DEFINE_string(trace_hydrogen_file, NULL, "trace hydrogen to given file name")
DEFINE_string(trace_phase, "HLZ", "trace generated IR for specified phases")
DEFINE_bool(trace_inlining, false, "trace inlining decisions")
DEFINE_bool(trace_load_elimination, false, "trace load elimination")
+DEFINE_bool(trace_store_elimination, false, "trace store elimination")
DEFINE_bool(trace_alloc, false, "trace register allocator")
DEFINE_bool(trace_all_uses, false, "trace all use positions")
DEFINE_bool(trace_range, false, "trace range analysis")
@@ -301,6 +296,7 @@ DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(array_bounds_checks_elimination, true,
"perform array bounds checks elimination")
+DEFINE_bool(trace_bce, false, "trace array bounds check elimination")
DEFINE_bool(array_bounds_checks_hoisting, false,
"perform array bounds checks hoisting")
DEFINE_bool(array_index_dehoisting, true,
@@ -309,6 +305,7 @@ DEFINE_bool(analyze_environment_liveness, true,
"analyze liveness of environment slots and zap dead values")
DEFINE_bool(load_elimination, true, "use load elimination")
DEFINE_bool(check_elimination, true, "use check elimination")
+DEFINE_bool(store_elimination, false, "use store elimination")
DEFINE_bool(dead_code_elimination, true, "use dead code elimination")
DEFINE_bool(fold_constants, true, "use constant folding")
DEFINE_bool(trace_dead_code_elimination, false, "trace dead code elimination")
@@ -353,6 +350,9 @@ DEFINE_bool(omit_map_checks_for_leaf_maps, true,
"do not emit check maps for constant values that have a leaf map, "
"deoptimize the optimized code if the layout of the maps changes.")
+DEFINE_int(typed_array_max_size_in_heap, 64,
+ "threshold for in-heap typed array")
+
// Profiler flags.
DEFINE_int(frame_count, 1, "number of stack frames inspected by the profiler")
// 0x1800 fits in the immediate field of an ARM instruction.
@@ -396,6 +396,8 @@ DEFINE_bool(enable_32dregs, ENABLE_32DREGS_DEFAULT,
"enable use of d16-d31 registers on ARM - this requires VFP3")
DEFINE_bool(enable_vldr_imm, false,
"enable use of constant pools for double immediate (ARM only)")
+DEFINE_bool(force_long_branches, false,
+ "force all emitted branches to be in long mode (MIPS only)")
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
@@ -416,10 +418,6 @@ DEFINE_bool(disable_native_files, false, "disable builtin natives files")
// builtins-ia32.cc
DEFINE_bool(inline_new, true, "use fast inline allocation")
-// checks.cc
-DEFINE_bool(stack_trace_on_abort, true,
- "print a stack trace if an assertion failure occurs")
-
// codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(trace_codegen, false,
"print name of functions for which code is generated")
@@ -470,7 +468,7 @@ DEFINE_bool(debugger_auto_break, true,
"automatically set the debug break flag when debugger commands are "
"in the queue")
DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
-DEFINE_bool(break_on_abort, true, "always cause a debug break before aborting")
+DEFINE_bool(hard_abort, true, "abort by crashing")
// execution.cc
// Slightly less than 1MB on 64-bit, since Windows' default stack size for
@@ -535,6 +533,7 @@ DEFINE_bool(parallel_sweeping, true, "enable parallel sweeping")
DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping")
DEFINE_int(sweeper_threads, 0,
"number of parallel and concurrent sweeping threads")
+DEFINE_bool(job_based_sweeping, false, "enable job based sweeping")
#ifdef VERIFY_HEAP
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
#endif
@@ -571,6 +570,8 @@ DEFINE_bool(cleanup_code_caches_at_gc, true,
DEFINE_bool(use_marking_progress_bar, true,
"Use a progress bar to scan large objects in increments when "
"incremental marking is active.")
+DEFINE_bool(zap_code_space, true,
+ "Zap free memory in code space with 0xCC while sweeping.")
DEFINE_int(random_seed, 0,
"Default seed for initializing random generator "
"(0, the default, means to use system random).")
@@ -582,19 +583,36 @@ DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
DEFINE_bool(trace_parse, false, "trace parsing and preparsing")
-// simulator-arm.cc and simulator-mips.cc
+// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
+DEFINE_bool(debug_sim, false, "Enable debugging the simulator")
DEFINE_bool(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
+#ifdef V8_TARGET_ARCH_ARM64
+DEFINE_int(sim_stack_alignment, 16,
+ "Stack alignment in bytes in simulator. This must be a power of two "
+ "and it must be at least 16. 16 is default.")
+#else
DEFINE_int(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
+#endif
+DEFINE_int(sim_stack_size, 2 * MB / KB,
+ "Stack size of the ARM64 simulator in kBytes (default is 2 MB)")
+DEFINE_bool(log_regs_modified, true,
+ "When logging register values, only print modified registers.")
+DEFINE_bool(log_colour, true,
+ "When logging, try to use coloured output.")
+DEFINE_bool(ignore_asm_unimplemented_break, false,
+ "Don't break for ASM_UNIMPLEMENTED_BREAK macros.")
+DEFINE_bool(trace_sim_messages, false,
+ "Trace simulator debug messages. Implied by --trace-sim.")
// isolate.cc
+DEFINE_bool(stack_trace_on_illegal, false,
+ "print stack trace when an illegal exception is thrown")
DEFINE_bool(abort_on_uncaught_exception, false,
"abort program (dump core) when an uncaught exception is thrown")
-DEFINE_bool(trace_exception, false,
- "print stack trace when throwing exceptions")
DEFINE_bool(randomize_hashes, true,
"randomize hashes to avoid predictable hash collisions "
"(with snapshots this option cannot override the baked-in seed)")
@@ -633,7 +651,6 @@ DEFINE_bool(profile_hydrogen_code_stub_compilation, false,
"Print the time it takes to lazily compile hydrogen code stubs.")
DEFINE_bool(predictable, false, "enable predictable mode")
-DEFINE_neg_implication(predictable, randomize_hashes)
DEFINE_neg_implication(predictable, concurrent_recompilation)
DEFINE_neg_implication(predictable, concurrent_osr)
DEFINE_neg_implication(predictable, concurrent_sweeping)
@@ -799,6 +816,11 @@ DEFINE_bool(log_timer_events, false,
"Time events including external callbacks.")
DEFINE_implication(log_timer_events, log_internal_timer_events)
DEFINE_implication(log_internal_timer_events, prof)
+DEFINE_bool(log_instruction_stats, false, "Log AArch64 instruction statistics.")
+DEFINE_string(log_instruction_file, "arm64_inst.csv",
+ "AArch64 instruction statistics log file.")
+DEFINE_int(log_instruction_period, 1 << 22,
+ "AArch64 instruction statistics logging period.")
DEFINE_bool(redirect_code_traces, false,
"output deopt information and disassembly into file "
@@ -806,6 +828,9 @@ DEFINE_bool(redirect_code_traces, false,
DEFINE_string(redirect_code_traces_to, NULL,
"output deopt information and disassembly into the given file")
+DEFINE_bool(hydrogen_track_positions, false,
+ "track source code positions when building IR")
+
//
// Disassembler only flags
//
@@ -838,8 +863,6 @@ DEFINE_bool(print_unopt_code, false, "print unoptimized code before "
"printing optimized code based on it")
DEFINE_bool(print_code_verbose, false, "print more information for code")
DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
-DEFINE_bool(emit_opt_code_positions, false,
- "annotate optimize code with source code positions")
#ifdef ENABLE_DISASSEMBLER
DEFINE_bool(sodium, false, "print generated code output suitable for use with "
@@ -848,7 +871,7 @@ DEFINE_bool(sodium, false, "print generated code output suitable for use with "
DEFINE_implication(sodium, print_code_stubs)
DEFINE_implication(sodium, print_code)
DEFINE_implication(sodium, print_opt_code)
-DEFINE_implication(sodium, emit_opt_code_positions)
+DEFINE_implication(sodium, hydrogen_track_positions)
DEFINE_implication(sodium, code_comments)
DEFINE_bool(print_all_code, false, "enable all flags related to printing code")
@@ -871,7 +894,7 @@ DEFINE_implication(print_all_code, trace_codegen)
#define FLAG FLAG_READONLY
// assembler-arm.h
-DEFINE_bool(enable_ool_constant_pool, false,
+DEFINE_bool(enable_ool_constant_pool, V8_OOL_CONSTANT_POOL,
"enable use of out-of-line constant pools (ARM only)")
// Cleanup...
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 2b15bfffa..aacb5664a 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -36,6 +36,8 @@
#include "ia32/frames-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/frames-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/frames-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/frames-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -199,6 +201,11 @@ inline Address StandardFrame::ComputePCAddress(Address fp) {
}
+inline Address StandardFrame::ComputeConstantPoolAddress(Address fp) {
+ return fp + StandardFrameConstants::kConstantPoolOffset;
+}
+
+
inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
Object* marker =
Memory::Object_at(fp + StandardFrameConstants::kContextOffset);
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 3b55c276c..0c47de910 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -531,6 +531,10 @@ void ExitFrame::ComputeCallerState(State* state) const {
state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset));
+ if (FLAG_enable_ool_constant_pool) {
+ state->constant_pool_address = reinterpret_cast<Address*>(
+ fp() + ExitFrameConstants::kConstantPoolOffset);
+ }
}
@@ -574,6 +578,8 @@ void ExitFrame::FillState(Address fp, Address sp, State* state) {
state->fp = fp;
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(sp - 1 * kPCOnStackSize));
+ state->constant_pool_address =
+ reinterpret_cast<Address*>(fp + ExitFrameConstants::kConstantPoolOffset);
}
@@ -610,6 +616,8 @@ void StandardFrame::ComputeCallerState(State* state) const {
state->fp = caller_fp();
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(ComputePCAddress(fp())));
+ state->constant_pool_address =
+ reinterpret_cast<Address*>(ComputeConstantPoolAddress(fp()));
}
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index e5b6d3dd0..17f0cb35a 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -35,7 +35,11 @@
namespace v8 {
namespace internal {
+#if V8_TARGET_ARCH_ARM64
+typedef uint64_t RegList;
+#else
typedef uint32_t RegList;
+#endif
// Get the number of registers in a given register list.
int NumRegs(RegList list);
@@ -221,10 +225,12 @@ class StackFrame BASE_EMBEDDED {
};
struct State {
- State() : sp(NULL), fp(NULL), pc_address(NULL) { }
+ State() : sp(NULL), fp(NULL), pc_address(NULL),
+ constant_pool_address(NULL) { }
Address sp;
Address fp;
Address* pc_address;
+ Address* constant_pool_address;
};
// Copy constructor; it breaks the connection to host iterator
@@ -266,6 +272,11 @@ class StackFrame BASE_EMBEDDED {
Address pc() const { return *pc_address(); }
void set_pc(Address pc) { *pc_address() = pc; }
+ Address constant_pool() const { return *constant_pool_address(); }
+ void set_constant_pool(ConstantPoolArray* constant_pool) {
+ *constant_pool_address() = reinterpret_cast<Address>(constant_pool);
+ }
+
virtual void SetCallerFp(Address caller_fp) = 0;
// Manually changes value of fp in this object.
@@ -273,6 +284,10 @@ class StackFrame BASE_EMBEDDED {
Address* pc_address() const { return state_.pc_address; }
+ Address* constant_pool_address() const {
+ return state_.constant_pool_address;
+ }
+
// Get the id of this stack frame.
Id id() const { return static_cast<Id>(OffsetFrom(caller_sp())); }
@@ -492,6 +507,10 @@ class StandardFrame: public StackFrame {
// by the provided frame pointer.
static inline Address ComputePCAddress(Address fp);
+ // Computes the address of the constant pool field in the standard
+ // frame given by the provided frame pointer.
+ static inline Address ComputeConstantPoolAddress(Address fp);
+
// Iterate over expression stack including stack handlers, locals,
// and parts of the fixed part including context and code fields.
void IterateExpressions(ObjectVisitor* v) const;
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index e14afefda..fa9ecf41b 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -345,7 +345,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
info->function()->scope()->AllowsLazyCompilation());
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
- cgen.PopulateTypeFeedbackCells(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_handler_table(*cgen.handler_table());
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -387,6 +386,18 @@ unsigned FullCodeGenerator::EmitBackEdgeTable() {
}
+void FullCodeGenerator::InitializeFeedbackVector() {
+ int length = info_->function()->slot_count();
+ feedback_vector_ = isolate()->factory()->NewFixedArray(length, TENURED);
+ Handle<Object> sentinel = TypeFeedbackInfo::UninitializedSentinel(isolate());
+ // Ensure that it's safe to set without using a write barrier.
+ ASSERT_EQ(isolate()->heap()->uninitialized_symbol(), *sentinel);
+ for (int i = 0; i < length; i++) {
+ feedback_vector_->set(i, *sentinel, SKIP_WRITE_BARRIER);
+ }
+}
+
+
void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
// Fill in the deoptimization information.
ASSERT(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty());
@@ -405,6 +416,7 @@ void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
Handle<TypeFeedbackInfo> info = isolate()->factory()->NewTypeFeedbackInfo();
info->set_ic_total_count(ic_total_count_);
+ info->set_feedback_vector(*FeedbackVector());
ASSERT(!isolate()->heap()->InNewSpace(*info));
code->set_type_feedback_info(*info);
}
@@ -425,21 +437,6 @@ void FullCodeGenerator::Initialize() {
}
-void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
- if (type_feedback_cells_.is_empty()) return;
- int length = type_feedback_cells_.length();
- int array_size = TypeFeedbackCells::LengthOfFixedArray(length);
- Handle<TypeFeedbackCells> cache = Handle<TypeFeedbackCells>::cast(
- isolate()->factory()->NewFixedArray(array_size, TENURED));
- for (int i = 0; i < length; i++) {
- cache->SetAstId(i, type_feedback_cells_[i].ast_id);
- cache->SetCell(i, *type_feedback_cells_[i].cell);
- }
- TypeFeedbackInfo::cast(code->type_feedback_info())->set_type_feedback_cells(
- *cache);
-}
-
-
void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
PrepareForBailoutForId(node->id(), state);
}
@@ -449,13 +446,13 @@ void FullCodeGenerator::CallLoadIC(ContextualMode contextual_mode,
TypeFeedbackId id) {
ExtraICState extra_state = LoadIC::ComputeExtraICState(contextual_mode);
Handle<Code> ic = LoadIC::initialize_stub(isolate(), extra_state);
- CallIC(ic, contextual_mode, id);
+ CallIC(ic, id);
}
-void FullCodeGenerator::CallStoreIC(ContextualMode mode, TypeFeedbackId id) {
+void FullCodeGenerator::CallStoreIC(TypeFeedbackId id) {
Handle<Code> ic = StoreIC::initialize_stub(isolate(), strict_mode());
- CallIC(ic, mode, id);
+ CallIC(ic, id);
}
@@ -490,13 +487,6 @@ void FullCodeGenerator::PrepareForBailoutForId(BailoutId id, State state) {
}
-void FullCodeGenerator::RecordTypeFeedbackCell(
- TypeFeedbackId id, Handle<Cell> cell) {
- TypeFeedbackCellEntry entry = { id, cell };
- type_feedback_cells_.Add(entry, zone());
-}
-
-
void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
// The pc offset does not need to be encoded and packed together with a state.
ASSERT(masm_->pc_offset() > 0);
@@ -634,7 +624,7 @@ void FullCodeGenerator::AllocateModules(ZoneList<Declaration*>* declarations) {
ASSERT(scope->interface()->Index() >= 0);
__ Push(Smi::FromInt(scope->interface()->Index()));
__ Push(scope->GetScopeInfo());
- __ CallRuntime(Runtime::kPushModuleContext, 2);
+ __ CallRuntime(Runtime::kHiddenPushModuleContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
@@ -774,7 +764,7 @@ void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
ASSERT(interface->Index() >= 0);
__ Push(Smi::FromInt(interface->Index()));
__ Push(Smi::FromInt(0));
- __ CallRuntime(Runtime::kPushModuleContext, 2);
+ __ CallRuntime(Runtime::kHiddenPushModuleContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
{
@@ -825,10 +815,10 @@ void FullCodeGenerator::VisitModuleUrl(ModuleUrl* module) {
int FullCodeGenerator::DeclareGlobalsFlags() {
- ASSERT(DeclareGlobalsLanguageMode::is_valid(language_mode()));
+ ASSERT(DeclareGlobalsStrictMode::is_valid(strict_mode()));
return DeclareGlobalsEvalFlag::encode(is_eval()) |
DeclareGlobalsNativeFlag::encode(is_native()) |
- DeclareGlobalsLanguageMode::encode(language_mode());
+ DeclareGlobalsStrictMode::encode(strict_mode());
}
@@ -893,7 +883,7 @@ void FullCodeGenerator::SetExpressionPosition(Expression* expr) {
}
}
#else
- CodeGenerator::RecordPositions(masm_, pos);
+ CodeGenerator::RecordPositions(masm_, expr->position());
#endif
}
@@ -918,7 +908,6 @@ void FullCodeGenerator::SetSourcePosition(int pos) {
const FullCodeGenerator::InlineFunctionGenerator
FullCodeGenerator::kInlineFunctionGenerators[] = {
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
- INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
};
#undef INLINE_FUNCTION_GENERATOR_ADDRESS
@@ -1102,7 +1091,7 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
{ Comment cmnt(masm_, "[ Extend block context");
__ Push(scope_->GetScopeInfo());
PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushBlockContext, 2);
+ __ CallRuntime(Runtime::kHiddenPushBlockContext, 2);
// Replace the context stored in the frame.
StoreToFrameField(StandardFrameConstants::kContextOffset,
@@ -1134,7 +1123,7 @@ void FullCodeGenerator::VisitModuleStatement(ModuleStatement* stmt) {
__ Push(Smi::FromInt(stmt->proxy()->interface()->Index()));
__ Push(Smi::FromInt(0));
- __ CallRuntime(Runtime::kPushModuleContext, 2);
+ __ CallRuntime(Runtime::kHiddenPushModuleContext, 2);
StoreToFrameField(
StandardFrameConstants::kContextOffset, context_register());
@@ -1273,7 +1262,7 @@ void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
VisitForStackValue(stmt->expression());
PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushWithContext, 2);
+ __ CallRuntime(Runtime::kHiddenPushWithContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
Scope* saved_scope = scope();
@@ -1426,7 +1415,7 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
__ Push(stmt->variable()->name());
__ Push(result_register());
PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushCatchContext, 3);
+ __ CallRuntime(Runtime::kHiddenPushCatchContext, 3);
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
@@ -1490,7 +1479,7 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// rethrow the exception if it returns.
__ Call(&finally_entry);
__ Push(result_register());
- __ CallRuntime(Runtime::kReThrow, 1);
+ __ CallRuntime(Runtime::kHiddenReThrow, 1);
// Finally block implementation.
__ bind(&finally_entry);
@@ -1616,7 +1605,7 @@ void FullCodeGenerator::VisitNativeFunctionLiteral(
void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw");
VisitForStackValue(expr->exception());
- __ CallRuntime(Runtime::kThrow, 1);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
// Never returns here.
}
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index d52f3c410..0d0a6ffed 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -96,9 +96,6 @@ class FullCodeGenerator: public AstVisitor {
? info->function()->ast_node_count() : 0,
info->zone()),
back_edges_(2, info->zone()),
- type_feedback_cells_(info->HasDeoptimizationSupport()
- ? info->function()->ast_node_count() : 0,
- info->zone()),
ic_total_count_(0) {
Initialize();
}
@@ -130,6 +127,9 @@ class FullCodeGenerator: public AstVisitor {
static const int kCodeSizeMultiplier = 162;
#elif V8_TARGET_ARCH_ARM
static const int kCodeSizeMultiplier = 142;
+#elif V8_TARGET_ARCH_ARM64
+// TODO(all): Copied ARM value. Check this is sensible for ARM64.
+ static const int kCodeSizeMultiplier = 142;
#elif V8_TARGET_ARCH_MIPS
static const int kCodeSizeMultiplier = 142;
#else
@@ -434,9 +434,15 @@ class FullCodeGenerator: public AstVisitor {
void PrepareForBailout(Expression* node, State state);
void PrepareForBailoutForId(BailoutId id, State state);
- // Cache cell support. This associates AST ids with global property cells
- // that will be cleared during GC and collected by the type-feedback oracle.
- void RecordTypeFeedbackCell(TypeFeedbackId id, Handle<Cell> cell);
+ // Feedback slot support. The feedback vector will be cleared during gc and
+ // collected by the type-feedback oracle.
+ Handle<FixedArray> FeedbackVector() {
+ return feedback_vector_;
+ }
+ void StoreFeedbackVectorSlot(int slot, Handle<Object> object) {
+ feedback_vector_->set(slot, *object);
+ }
+ void InitializeFeedbackVector();
// Record a call's return site offset, used to rebuild the frame if the
// called function was inlined at the site.
@@ -491,7 +497,6 @@ class FullCodeGenerator: public AstVisitor {
#define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
void Emit##name(CallRuntime* expr);
INLINE_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
- INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
#undef EMIT_INLINE_RUNTIME_CALL
// Platform-specific code for resuming generators.
@@ -552,6 +557,11 @@ class FullCodeGenerator: public AstVisitor {
void EmitVariableAssignment(Variable* var,
Token::Value op);
+ // Helper functions to EmitVariableAssignment
+ void EmitStoreToStackLocalOrContextSlot(Variable* var,
+ MemOperand location);
+ void EmitCallStoreContextSlot(Handle<String> name, StrictMode strict_mode);
+
// Complete a named property assignment. The receiver is expected on top
// of the stack and the right-hand-side value in the accumulator.
void EmitNamedPropertyAssignment(Assignment* expr);
@@ -562,13 +572,11 @@ class FullCodeGenerator: public AstVisitor {
void EmitKeyedPropertyAssignment(Assignment* expr);
void CallIC(Handle<Code> code,
- ContextualMode mode = NOT_CONTEXTUAL,
TypeFeedbackId id = TypeFeedbackId::None());
void CallLoadIC(ContextualMode mode,
TypeFeedbackId id = TypeFeedbackId::None());
- void CallStoreIC(ContextualMode mode,
- TypeFeedbackId id = TypeFeedbackId::None());
+ void CallStoreIC(TypeFeedbackId id = TypeFeedbackId::None());
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
@@ -598,11 +606,7 @@ class FullCodeGenerator: public AstVisitor {
Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); }
bool is_native() { return info_->is_native(); }
- bool is_classic_mode() { return language_mode() == CLASSIC_MODE; }
- StrictModeFlag strict_mode() {
- return is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
- LanguageMode language_mode() { return function()->language_mode(); }
+ StrictMode strict_mode() { return function()->strict_mode(); }
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return scope_; }
@@ -635,7 +639,6 @@ class FullCodeGenerator: public AstVisitor {
void Generate();
void PopulateDeoptimizationData(Handle<Code> code);
void PopulateTypeFeedbackInfo(Handle<Code> code);
- void PopulateTypeFeedbackCells(Handle<Code> code);
Handle<FixedArray> handler_table() { return handler_table_; }
@@ -650,12 +653,6 @@ class FullCodeGenerator: public AstVisitor {
uint32_t loop_depth;
};
- struct TypeFeedbackCellEntry {
- TypeFeedbackId ast_id;
- Handle<Cell> cell;
- };
-
-
class ExpressionContext BASE_EMBEDDED {
public:
explicit ExpressionContext(FullCodeGenerator* codegen)
@@ -845,9 +842,9 @@ class FullCodeGenerator: public AstVisitor {
ZoneList<BailoutEntry> bailout_entries_;
GrowableBitVector prepared_bailout_ids_;
ZoneList<BackEdgeEntry> back_edges_;
- ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_;
Handle<FixedArray> handler_table_;
+ Handle<FixedArray> feedback_vector_;
Handle<Cell> profiling_counter_;
bool generate_debug_code_;
diff --git a/deps/v8/src/func-name-inferrer.cc b/deps/v8/src/func-name-inferrer.cc
index 5409a4e18..441113b7d 100644
--- a/deps/v8/src/func-name-inferrer.cc
+++ b/deps/v8/src/func-name-inferrer.cc
@@ -83,11 +83,14 @@ Handle<String> FuncNameInferrer::MakeNameFromStackHelper(int pos,
return MakeNameFromStackHelper(pos + 1, prev);
} else {
if (prev->length() > 0) {
+ Handle<String> name = names_stack_.at(pos).name;
+ if (prev->length() + name->length() + 1 > String::kMaxLength) return prev;
Factory* factory = isolate()->factory();
- Handle<String> curr = factory->NewConsString(
- factory->dot_string(), names_stack_.at(pos).name);
- return MakeNameFromStackHelper(pos + 1,
- factory->NewConsString(prev, curr));
+ Handle<String> curr = factory->NewConsString(factory->dot_string(), name);
+ CHECK_NOT_EMPTY_HANDLE(isolate(), curr);
+ curr = factory->NewConsString(prev, curr);
+ CHECK_NOT_EMPTY_HANDLE(isolate(), curr);
+ return MakeNameFromStackHelper(pos + 1, curr);
} else {
return MakeNameFromStackHelper(pos + 1, names_stack_.at(pos).name);
}
diff --git a/deps/v8/src/func-name-inferrer.h b/deps/v8/src/func-name-inferrer.h
index f57e77860..41953ffed 100644
--- a/deps/v8/src/func-name-inferrer.h
+++ b/deps/v8/src/func-name-inferrer.h
@@ -28,9 +28,13 @@
#ifndef V8_FUNC_NAME_INFERRER_H_
#define V8_FUNC_NAME_INFERRER_H_
+#include "handles.h"
+#include "zone.h"
+
namespace v8 {
namespace internal {
+class FunctionLiteral;
class Isolate;
// FuncNameInferrer is a stateful class that is used to perform name
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 09449791f..e06f79482 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -235,10 +235,12 @@ class GlobalHandles::Node {
weak_callback_ = weak_callback;
}
- void ClearWeakness() {
+ void* ClearWeakness() {
ASSERT(state() != FREE);
+ void* p = parameter();
set_state(NORMAL);
set_parameter(NULL);
+ return p;
}
bool PostGarbageCollectionProcessing(Isolate* isolate) {
@@ -271,7 +273,7 @@ class GlobalHandles::Node {
}
// Absence of explicit cleanup or revival of weak handle
// in most of the cases would lead to memory leak.
- ASSERT(state() != NEAR_DEATH);
+ CHECK(state() != NEAR_DEATH);
return true;
}
@@ -502,8 +504,8 @@ void GlobalHandles::MakeWeak(Object** location,
}
-void GlobalHandles::ClearWeakness(Object** location) {
- Node::FromLocation(location)->ClearWeakness();
+void* GlobalHandles::ClearWeakness(Object** location) {
+ return Node::FromLocation(location)->ClearWeakness();
}
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index a40645199..13fc111d8 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -161,7 +161,7 @@ class GlobalHandles {
}
// Clear the weakness of a global handle.
- static void ClearWeakness(Object** location);
+ static void* ClearWeakness(Object** location);
// Clear the weakness of a global handle.
static void MarkIndependent(Object** location);
@@ -340,6 +340,7 @@ class EternalHandles {
enum SingletonHandle {
I18N_TEMPLATE_ONE,
I18N_TEMPLATE_TWO,
+ DATE_CACHE_VERSION,
NUMBER_OF_SINGLETON_HANDLES
};
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index b9437f2ac..db666d804 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -71,6 +71,10 @@ namespace internal {
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
+#elif defined(__AARCH64EL__)
+#define V8_HOST_ARCH_ARM64 1
+#define V8_HOST_ARCH_64_BIT 1
+#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1
@@ -78,7 +82,7 @@ namespace internal {
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
#else
-#error Host architecture was not detected as supported by v8
+#error "Host architecture was not detected as supported by v8"
#endif
#if defined(__ARM_ARCH_7A__) || \
@@ -95,11 +99,13 @@ namespace internal {
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && \
- !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
+ !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
#define V8_TARGET_ARCH_IA32 1
+#elif defined(__AARCH64EL__)
+#define V8_TARGET_ARCH_ARM64 1
#elif defined(__ARMEL__)
#define V8_TARGET_ARCH_ARM 1
#elif defined(__MIPSEL__)
@@ -119,6 +125,9 @@ namespace internal {
#if (V8_TARGET_ARCH_ARM && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM))
#error Target architecture arm is only supported on arm and ia32 host
#endif
+#if (V8_TARGET_ARCH_ARM64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_ARM64))
+#error Target architecture arm64 is only supported on arm64 and x64 host
+#endif
#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
#error Target architecture mips is only supported on mips and ia32 host
#endif
@@ -127,6 +136,9 @@ namespace internal {
// Setting USE_SIMULATOR explicitly from the build script will force
// the use of a simulated environment.
#if !defined(USE_SIMULATOR)
+#if (V8_TARGET_ARCH_ARM64 && !V8_HOST_ARCH_ARM64)
+#define USE_SIMULATOR 1
+#endif
#if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
#define USE_SIMULATOR 1
#endif
@@ -142,12 +154,17 @@ namespace internal {
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_ARM
#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_ARM64
+#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_MIPS
#define V8_TARGET_LITTLE_ENDIAN 1
#else
#error Unknown target architecture endiannes
#endif
+// Determine whether the architecture uses an out-of-line constant pool.
+#define V8_OOL_CONSTANT_POOL 0
+
// Support for alternative bool type. This is only enabled if the code is
// compiled with USE_MYBOOL defined. This catches some nasty type bugs.
// For instance, 'bool b = "false";' results in b == true! This is a hidden
@@ -376,6 +393,12 @@ F FUNCTION_CAST(Address addr) {
#define DISABLE_ASAN
#endif
+#if V8_CC_GNU
+#define V8_IMMEDIATE_CRASH() __builtin_trap()
+#else
+#define V8_IMMEDIATE_CRASH() ((void(*)())0)()
+#endif
+
// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes
@@ -387,34 +410,9 @@ template <typename T, class P = FreeStoreAllocationPolicy> class List;
// -----------------------------------------------------------------------------
// Declarations for use in both the preparser and the rest of V8.
-// The different language modes that V8 implements. ES5 defines two language
-// modes: an unrestricted mode respectively a strict mode which are indicated by
-// CLASSIC_MODE respectively STRICT_MODE in the enum. The harmony spec drafts
-// for the next ES standard specify a new third mode which is called 'extended
-// mode'. The extended mode is only available if the harmony flag is set. It is
-// based on the 'strict mode' and adds new functionality to it. This means that
-// most of the semantics of these two modes coincide.
-//
-// In the current draft the term 'base code' is used to refer to code that is
-// neither in strict nor extended mode. However, the more distinguishing term
-// 'classic mode' is used in V8 instead to avoid mix-ups.
-
-enum LanguageMode {
- CLASSIC_MODE,
- STRICT_MODE,
- EXTENDED_MODE
-};
-
-
// The Strict Mode (ECMA-262 5th edition, 4.2.2).
-//
-// This flag is used in the backend to represent the language mode. So far
-// there is no semantic difference between the strict and the extended mode in
-// the backend, so both modes are represented by the kStrictMode value.
-enum StrictModeFlag {
- kNonStrictMode,
- kStrictMode
-};
+
+enum StrictMode { SLOPPY, STRICT };
} } // namespace v8::internal
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index 22bbd7cd7..a25b4a226 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -97,7 +97,8 @@ bool Handle<T>::IsDereferenceAllowed(DereferenceCheckMode mode) const {
if (!AllowHandleDereference::IsAllowed()) return false;
if (mode == INCLUDE_DEFERRED_CHECK &&
!AllowDeferredHandleDereference::IsAllowed()) {
- // Accessing maps and internalized strings is safe.
+ // Accessing cells, maps and internalized strings is safe.
+ if (heap_object->IsCell()) return true;
if (heap_object->IsMap()) return true;
if (heap_object->IsInternalizedString()) return true;
return !heap->isolate()->IsDeferredHandle(handle);
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 830eb0960..398a68265 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -509,7 +509,7 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
Isolate* isolate = object->GetIsolate();
Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
Handle<JSObject> arguments_boilerplate = Handle<JSObject>(
- isolate->context()->native_context()->arguments_boilerplate(),
+ isolate->context()->native_context()->sloppy_arguments_boilerplate(),
isolate);
Handle<JSFunction> arguments_function = Handle<JSFunction>(
JSFunction::cast(arguments_boilerplate->map()->constructor()),
@@ -537,10 +537,10 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
// Check access rights if required.
if (current->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*current,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
+ !isolate->MayNamedAccessWrapper(current,
+ isolate->factory()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheckWrapper(current, v8::ACCESS_KEYS);
if (isolate->has_scheduled_exception()) {
isolate->PromoteScheduledException();
*threw = true;
@@ -712,35 +712,12 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
return ReduceFixedArrayTo(storage, enum_size);
} else {
Handle<NameDictionary> dictionary(object->property_dictionary());
-
- int length = dictionary->NumberOfElements();
+ int length = dictionary->NumberOfEnumElements();
if (length == 0) {
return Handle<FixedArray>(isolate->heap()->empty_fixed_array());
}
-
- // The enumeration array is generated by allocating an array big enough to
- // hold all properties that have been seen, whether they are are deleted or
- // not. Subsequently all visible properties are added to the array. If some
- // properties were not visible, the array is trimmed so it only contains
- // visible properties. This improves over adding elements and sorting by
- // index by having linear complexity rather than n*log(n).
-
- // By comparing the monotonous NextEnumerationIndex to the NumberOfElements,
- // we can predict the number of holes in the final array. If there will be
- // more than 50% holes, regenerate the enumeration indices to reduce the
- // number of holes to a minimum. This avoids allocating a large array if
- // many properties were added but subsequently deleted.
- int next_enumeration = dictionary->NextEnumerationIndex();
- if (!object->IsGlobalObject() && next_enumeration > (length * 3) / 2) {
- NameDictionary::DoGenerateNewEnumerationIndices(dictionary);
- next_enumeration = dictionary->NextEnumerationIndex();
- }
-
- Handle<FixedArray> storage =
- isolate->factory()->NewFixedArray(next_enumeration);
-
- storage = Handle<FixedArray>(dictionary->CopyEnumKeysTo(*storage));
- ASSERT(storage->length() == object->NumberOfLocalProperties(DONT_SHOW));
+ Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
+ dictionary->CopyEnumKeysTo(*storage);
return storage;
}
}
diff --git a/deps/v8/src/harmony-array.js b/deps/v8/src/harmony-array.js
index 2cedebaae..d37d87538 100644
--- a/deps/v8/src/harmony-array.js
+++ b/deps/v8/src/harmony-array.js
@@ -51,7 +51,7 @@ function ArrayFind(predicate /* thisArg */) { // length == 1
if (IS_NULL_OR_UNDEFINED(thisArg)) {
thisArg = %GetDefaultReceiver(predicate) || thisArg;
- } else if (!IS_SPEC_OBJECT(thisArg) && %IsClassicModeFunction(predicate)) {
+ } else if (!IS_SPEC_OBJECT(thisArg) && %IsSloppyModeFunction(predicate)) {
thisArg = ToObject(thisArg);
}
@@ -86,7 +86,7 @@ function ArrayFindIndex(predicate /* thisArg */) { // length == 1
if (IS_NULL_OR_UNDEFINED(thisArg)) {
thisArg = %GetDefaultReceiver(predicate) || thisArg;
- } else if (!IS_SPEC_OBJECT(thisArg) && %IsClassicModeFunction(predicate)) {
+ } else if (!IS_SPEC_OBJECT(thisArg) && %IsSloppyModeFunction(predicate)) {
thisArg = ToObject(thisArg);
}
diff --git a/deps/v8/src/harmony-math.js b/deps/v8/src/harmony-math.js
index d57a10404..298fa58cb 100644
--- a/deps/v8/src/harmony-math.js
+++ b/deps/v8/src/harmony-math.js
@@ -59,8 +59,7 @@ function MathSinh(x) {
// ES6 draft 09-27-13, section 20.2.2.12.
function MathCosh(x) {
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- // Idempotent for NaN and +/-Infinity.
- if (!NUMBER_IS_FINITE(x)) return x;
+ if (!NUMBER_IS_FINITE(x)) return MathAbs(x);
return (MathExp(x) + MathExp(-x)) / 2;
}
@@ -110,19 +109,19 @@ function MathAtanh(x) {
}
-//ES6 draft 09-27-13, section 20.2.2.21.
+// ES6 draft 09-27-13, section 20.2.2.21.
function MathLog10(x) {
return MathLog(x) * 0.434294481903251828; // log10(x) = log(x)/log(10).
}
-//ES6 draft 09-27-13, section 20.2.2.22.
+// ES6 draft 09-27-13, section 20.2.2.22.
function MathLog2(x) {
return MathLog(x) * 1.442695040888963407; // log2(x) = log(x)/log(2).
}
-//ES6 draft 09-27-13, section 20.2.2.17.
+// ES6 draft 09-27-13, section 20.2.2.17.
function MathHypot(x, y) { // Function length is 2.
// We may want to introduce fast paths for two arguments and when
// normalization to avoid overflow is not necessary. For now, we
@@ -155,6 +154,93 @@ function MathHypot(x, y) { // Function length is 2.
}
+// ES6 draft 09-27-13, section 20.2.2.16.
+function MathFround(x) {
+ return %Math_fround(TO_NUMBER_INLINE(x));
+}
+
+
+function MathClz32(x) {
+ x = ToUint32(TO_NUMBER_INLINE(x));
+ if (x == 0) return 32;
+ var result = 0;
+ // Binary search.
+ if ((x & 0xFFFF0000) === 0) { x <<= 16; result += 16; };
+ if ((x & 0xFF000000) === 0) { x <<= 8; result += 8; };
+ if ((x & 0xF0000000) === 0) { x <<= 4; result += 4; };
+ if ((x & 0xC0000000) === 0) { x <<= 2; result += 2; };
+ if ((x & 0x80000000) === 0) { x <<= 1; result += 1; };
+ return result;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.9.
+// Cube root approximation, refer to: http://metamerist.com/cbrt/cbrt.htm
+// Using initial approximation adapted from Kahan's cbrt and 4 iterations
+// of Newton's method.
+function MathCbrt(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ if (x == 0 || !NUMBER_IS_FINITE(x)) return x;
+ return x >= 0 ? CubeRoot(x) : -CubeRoot(-x);
+}
+
+macro NEWTON_ITERATION_CBRT(x, approx)
+ (1.0 / 3.0) * (x / (approx * approx) + 2 * approx);
+endmacro
+
+function CubeRoot(x) {
+ var approx_hi = MathFloor(%_DoubleHi(x) / 3) + 0x2A9F7893;
+ var approx = %_ConstructDouble(approx_hi, 0);
+ approx = NEWTON_ITERATION_CBRT(x, approx);
+ approx = NEWTON_ITERATION_CBRT(x, approx);
+ approx = NEWTON_ITERATION_CBRT(x, approx);
+ return NEWTON_ITERATION_CBRT(x, approx);
+}
+
+
+
+// ES6 draft 09-27-13, section 20.2.2.14.
+// Use Taylor series to approximate.
+// exp(x) - 1 at 0 == -1 + exp(0) + exp'(0)*x/1! + exp''(0)*x^2/2! + ...
+// == x/1! + x^2/2! + x^3/3! + ...
+// The closer x is to 0, the fewer terms are required.
+function MathExpm1(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ var xabs = MathAbs(x);
+ if (xabs < 2E-7) {
+ return x * (1 + x * (1/2));
+ } else if (xabs < 6E-5) {
+ return x * (1 + x * (1/2 + x * (1/6)));
+ } else if (xabs < 2E-2) {
+ return x * (1 + x * (1/2 + x * (1/6 +
+ x * (1/24 + x * (1/120 + x * (1/720))))));
+ } else { // Use regular exp if not close enough to 0.
+ return MathExp(x) - 1;
+ }
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.20.
+// Use Taylor series to approximate. With y = x + 1;
+// log(y) at 1 == log(1) + log'(1)(y-1)/1! + log''(1)(y-1)^2/2! + ...
+// == 0 + x - x^2/2 + x^3/3 ...
+// The closer x is to 0, the fewer terms are required.
+function MathLog1p(x) {
+ if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ var xabs = MathAbs(x);
+ if (xabs < 1E-7) {
+ return x * (1 - x * (1/2));
+ } else if (xabs < 3E-5) {
+ return x * (1 - x * (1/2 - x * (1/3)));
+ } else if (xabs < 7E-3) {
+ return x * (1 - x * (1/2 - x * (1/3 - x * (1/4 -
+ x * (1/5 - x * (1/6 - x * (1/7)))))));
+ } else { // Use regular log if not close enough to 0.
+ return MathLog(1 + x);
+ }
+}
+
+
function ExtendMath() {
%CheckIsBootstrapping();
@@ -170,8 +256,14 @@ function ExtendMath() {
"atanh", MathAtanh,
"log10", MathLog10,
"log2", MathLog2,
- "hypot", MathHypot
+ "hypot", MathHypot,
+ "fround", MathFround,
+ "clz32", MathClz32,
+ "cbrt", MathCbrt,
+ "log1p", MathLog1p,
+ "expm1", MathExpm1
));
}
+
ExtendMath();
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 35bad4af3..063cf30ff 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -137,8 +137,8 @@ MaybeObject* Heap::AllocateInternalizedStringImpl(
MaybeObject* Heap::AllocateOneByteInternalizedString(Vector<const uint8_t> str,
uint32_t hash_field) {
- if (str.length() > SeqOneByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0x2);
+ if (str.length() > String::kMaxLength) {
+ return isolate()->ThrowInvalidStringLength();
}
// Compute map and object size.
Map* map = ascii_internalized_string_map();
@@ -170,8 +170,8 @@ MaybeObject* Heap::AllocateOneByteInternalizedString(Vector<const uint8_t> str,
MaybeObject* Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
uint32_t hash_field) {
- if (str.length() > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0x3);
+ if (str.length() > String::kMaxLength) {
+ return isolate()->ThrowInvalidStringLength();
}
// Compute map and object size.
Map* map = internalized_string_map();
@@ -223,7 +223,7 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes,
HeapProfiler* profiler = isolate_->heap_profiler();
#ifdef DEBUG
if (FLAG_gc_interval >= 0 &&
- !disallow_allocation_failure_ &&
+ AllowAllocationFailure::IsAllowed(isolate_) &&
Heap::allocation_timeout_-- <= 0) {
return Failure::RetryAfterGC(space);
}
@@ -490,7 +490,8 @@ void Heap::ScavengePointer(HeapObject** p) {
}
-void Heap::UpdateAllocationSiteFeedback(HeapObject* object) {
+void Heap::UpdateAllocationSiteFeedback(HeapObject* object,
+ ScratchpadSlotMode mode) {
Heap* heap = object->GetHeap();
ASSERT(heap->InFromSpace(object));
@@ -518,7 +519,7 @@ void Heap::UpdateAllocationSiteFeedback(HeapObject* object) {
if (!memento->IsValid()) return;
if (memento->GetAllocationSite()->IncrementMementoFoundCount()) {
- heap->AddAllocationSiteToScratchpad(memento->GetAllocationSite());
+ heap->AddAllocationSiteToScratchpad(memento->GetAllocationSite(), mode);
}
}
@@ -541,7 +542,7 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
return;
}
- UpdateAllocationSiteFeedback(object);
+ UpdateAllocationSiteFeedback(object, IGNORE_SCRATCHPAD_SLOT);
// AllocationMementos are unrooted and shouldn't survive a scavenge
ASSERT(object->map() != object->GetHeap()->allocation_memento_map());
@@ -640,35 +641,26 @@ Isolate* Heap::isolate() {
// Warning: Do not use the identifiers __object__, __maybe_object__ or
// __scope__ in a call to this macro.
-#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY, OOM)\
+#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \
do { \
GC_GREEDY_CHECK(ISOLATE); \
MaybeObject* __maybe_object__ = FUNCTION_CALL; \
Object* __object__ = NULL; \
if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory()) { \
- OOM; \
- } \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
(ISOLATE)->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \
allocation_space(), \
"allocation failure"); \
__maybe_object__ = FUNCTION_CALL; \
if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory()) { \
- OOM; \
- } \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
(ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \
(ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \
{ \
- AlwaysAllocateScope __scope__; \
+ AlwaysAllocateScope __scope__(ISOLATE); \
__maybe_object__ = FUNCTION_CALL; \
} \
if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
- if (__maybe_object__->IsOutOfMemory()) { \
- OOM; \
- } \
if (__maybe_object__->IsRetryAfterGC()) { \
/* TODO(1181417): Fix this. */ \
v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true);\
@@ -682,8 +674,7 @@ Isolate* Heap::isolate() {
ISOLATE, \
FUNCTION_CALL, \
RETURN_VALUE, \
- RETURN_EMPTY, \
- v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY", true))
+ RETURN_EMPTY)
#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
CALL_AND_RETRY_OR_DIE(ISOLATE, \
@@ -700,7 +691,6 @@ Isolate* Heap::isolate() {
CALL_AND_RETRY(ISOLATE, \
FUNCTION_CALL, \
return __object__, \
- return __maybe_object__, \
return __maybe_object__)
@@ -777,21 +767,20 @@ void Heap::CompletelyClearInstanceofCache() {
}
-AlwaysAllocateScope::AlwaysAllocateScope() {
+AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
+ : heap_(isolate->heap()), daf_(isolate) {
// We shouldn't hit any nested scopes, because that requires
// non-handle code to call handle code. The code still works but
// performance will degrade, so we want to catch this situation
// in debug mode.
- Isolate* isolate = Isolate::Current();
- ASSERT(isolate->heap()->always_allocate_scope_depth_ == 0);
- isolate->heap()->always_allocate_scope_depth_++;
+ ASSERT(heap_->always_allocate_scope_depth_ == 0);
+ heap_->always_allocate_scope_depth_++;
}
AlwaysAllocateScope::~AlwaysAllocateScope() {
- Isolate* isolate = Isolate::Current();
- isolate->heap()->always_allocate_scope_depth_--;
- ASSERT(isolate->heap()->always_allocate_scope_depth_ == 0);
+ heap_->always_allocate_scope_depth_--;
+ ASSERT(heap_->always_allocate_scope_depth_ == 0);
}
@@ -809,6 +798,21 @@ NoWeakObjectVerificationScope::~NoWeakObjectVerificationScope() {
#endif
+GCCallbacksScope::GCCallbacksScope(Heap* heap) : heap_(heap) {
+ heap_->gc_callbacks_depth_++;
+}
+
+
+GCCallbacksScope::~GCCallbacksScope() {
+ heap_->gc_callbacks_depth_--;
+}
+
+
+bool GCCallbacksScope::CheckReenter() {
+ return heap_->gc_callbacks_depth_ == 1;
+}
+
+
void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
@@ -820,25 +824,15 @@ void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
}
-double GCTracer::SizeOfHeapObjects() {
- return (static_cast<double>(heap_->SizeOfObjects())) / MB;
-}
-
-
-DisallowAllocationFailure::DisallowAllocationFailure() {
-#ifdef DEBUG
- Isolate* isolate = Isolate::Current();
- old_state_ = isolate->heap()->disallow_allocation_failure_;
- isolate->heap()->disallow_allocation_failure_ = true;
-#endif
+void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
+ for (Object** current = start; current < end; current++) {
+ CHECK((*current)->IsSmi());
+ }
}
-DisallowAllocationFailure::~DisallowAllocationFailure() {
-#ifdef DEBUG
- Isolate* isolate = Isolate::Current();
- isolate->heap()->disallow_allocation_failure_ = old_state_;
-#endif
+double GCTracer::SizeOfHeapObjects() {
+ return (static_cast<double>(heap_->SizeOfObjects())) / MB;
}
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index 7413b6e68..1dc111321 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -168,7 +168,10 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
- ids_->MoveObject(from, to, size);
+ bool known_object = ids_->MoveObject(from, to, size);
+ if (!known_object && !allocation_tracker_.is_empty()) {
+ allocation_tracker_->address_to_trace()->MoveObject(from, to, size);
+ }
}
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc
index ccfbfb8d0..332d0dbf6 100644
--- a/deps/v8/src/heap-snapshot-generator.cc
+++ b/deps/v8/src/heap-snapshot-generator.cc
@@ -34,6 +34,7 @@
#include "heap-profiler.h"
#include "debug.h"
#include "types.h"
+#include "v8conversions.h"
namespace v8 {
namespace internal {
@@ -72,14 +73,16 @@ HeapEntry::HeapEntry(HeapSnapshot* snapshot,
Type type,
const char* name,
SnapshotObjectId id,
- int self_size)
+ size_t self_size,
+ unsigned trace_node_id)
: type_(type),
children_count_(0),
children_index_(-1),
self_size_(self_size),
- id_(id),
snapshot_(snapshot),
- name_(name) { }
+ name_(name),
+ id_(id),
+ trace_node_id_(trace_node_id) { }
void HeapEntry::SetNamedReference(HeapGraphEdge::Type type,
@@ -103,7 +106,7 @@ void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
void HeapEntry::Print(
const char* prefix, const char* edge_name, int max_depth, int indent) {
STATIC_CHECK(sizeof(unsigned) == sizeof(id()));
- OS::Print("%6d @%6u %*c %s%s: ",
+ OS::Print("%6" V8PRIuPTR " @%6u %*c %s%s: ",
self_size(), id(), indent, ' ', prefix, edge_name);
if (type() != kString) {
OS::Print("%s %.40s\n", TypeAsString(), name_);
@@ -188,12 +191,12 @@ template <size_t ptr_size> struct SnapshotSizeConstants;
template <> struct SnapshotSizeConstants<4> {
static const int kExpectedHeapGraphEdgeSize = 12;
- static const int kExpectedHeapEntrySize = 24;
+ static const int kExpectedHeapEntrySize = 28;
};
template <> struct SnapshotSizeConstants<8> {
static const int kExpectedHeapGraphEdgeSize = 24;
- static const int kExpectedHeapEntrySize = 32;
+ static const int kExpectedHeapEntrySize = 40;
};
} // namespace
@@ -242,6 +245,7 @@ HeapEntry* HeapSnapshot::AddRootEntry() {
HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
"",
HeapObjectsMap::kInternalRootObjectId,
+ 0,
0);
root_index_ = entry->index();
ASSERT(root_index_ == 0);
@@ -254,6 +258,7 @@ HeapEntry* HeapSnapshot::AddGcRootsEntry() {
HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
"(GC roots)",
HeapObjectsMap::kGcRootsObjectId,
+ 0,
0);
gc_roots_index_ = entry->index();
return entry;
@@ -267,6 +272,7 @@ HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
HeapEntry::kSynthetic,
VisitorSynchronization::kTagNames[tag],
HeapObjectsMap::GetNthGcSubrootId(tag),
+ 0,
0);
gc_subroot_indexes_[tag] = entry->index();
return entry;
@@ -276,8 +282,9 @@ HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
const char* name,
SnapshotObjectId id,
- int size) {
- HeapEntry entry(this, type, name, id, size);
+ size_t size,
+ unsigned trace_node_id) {
+ HeapEntry entry(this, type, name, id, size, trace_node_id);
entries_.Add(entry);
return &entries_.last();
}
@@ -389,10 +396,10 @@ HeapObjectsMap::HeapObjectsMap(Heap* heap)
}
-void HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
+bool HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
ASSERT(to != NULL);
ASSERT(from != NULL);
- if (from == to) return;
+ if (from == to) return false;
void* from_value = entries_map_.Remove(from, ComputePointerHash(from));
if (from_value == NULL) {
// It may occur that some untracked object moves to an address X and there
@@ -433,6 +440,7 @@ void HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
entries_.at(from_entry_info_index).size = object_size;
to_entry->value = from_value;
}
+ return from_value != NULL;
}
@@ -899,17 +907,88 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
HeapEntry::Type type,
const char* name) {
- int object_size = object->Size();
- SnapshotObjectId object_id =
- heap_object_map_->FindOrAddEntry(object->address(), object_size);
- return snapshot_->AddEntry(type, name, object_id, object_size);
+ return AddEntry(object->address(), type, name, object->Size());
+}
+
+
+HeapEntry* V8HeapExplorer::AddEntry(Address address,
+ HeapEntry::Type type,
+ const char* name,
+ size_t size) {
+ SnapshotObjectId object_id = heap_object_map_->FindOrAddEntry(
+ address, static_cast<unsigned int>(size));
+ unsigned trace_node_id = 0;
+ if (AllocationTracker* allocation_tracker =
+ snapshot_->profiler()->allocation_tracker()) {
+ trace_node_id =
+ allocation_tracker->address_to_trace()->GetTraceNodeId(address);
+ }
+ return snapshot_->AddEntry(type, name, object_id, size, trace_node_id);
}
+class SnapshotFiller {
+ public:
+ explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
+ : snapshot_(snapshot),
+ names_(snapshot->profiler()->names()),
+ entries_(entries) { }
+ HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
+ HeapEntry* entry = allocator->AllocateEntry(ptr);
+ entries_->Pair(ptr, entry->index());
+ return entry;
+ }
+ HeapEntry* FindEntry(HeapThing ptr) {
+ int index = entries_->Map(ptr);
+ return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index] : NULL;
+ }
+ HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
+ HeapEntry* entry = FindEntry(ptr);
+ return entry != NULL ? entry : AddEntry(ptr, allocator);
+ }
+ void SetIndexedReference(HeapGraphEdge::Type type,
+ int parent,
+ int index,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ parent_entry->SetIndexedReference(type, index, child_entry);
+ }
+ void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
+ int parent,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ int index = parent_entry->children_count() + 1;
+ parent_entry->SetIndexedReference(type, index, child_entry);
+ }
+ void SetNamedReference(HeapGraphEdge::Type type,
+ int parent,
+ const char* reference_name,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ parent_entry->SetNamedReference(type, reference_name, child_entry);
+ }
+ void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
+ int parent,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ int index = parent_entry->children_count() + 1;
+ parent_entry->SetNamedReference(
+ type,
+ names_->GetName(index),
+ child_entry);
+ }
+
+ private:
+ HeapSnapshot* snapshot_;
+ StringsStorage* names_;
+ HeapEntriesMap* entries_;
+};
+
+
class GcSubrootsEnumerator : public ObjectVisitor {
public:
GcSubrootsEnumerator(
- SnapshotFillerInterface* filler, V8HeapExplorer* explorer)
+ SnapshotFiller* filler, V8HeapExplorer* explorer)
: filler_(filler),
explorer_(explorer),
previous_object_count_(0),
@@ -926,14 +1005,14 @@ class GcSubrootsEnumerator : public ObjectVisitor {
}
}
private:
- SnapshotFillerInterface* filler_;
+ SnapshotFiller* filler_;
V8HeapExplorer* explorer_;
intptr_t previous_object_count_;
intptr_t object_count_;
};
-void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
+void V8HeapExplorer::AddRootEntries(SnapshotFiller* filler) {
filler->AddEntry(kInternalRootObject, this);
filler->AddEntry(kGcRootsObject, this);
GcSubrootsEnumerator enumerator(filler, this);
@@ -1029,6 +1108,8 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
if (obj->IsJSGlobalProxy()) {
ExtractJSGlobalProxyReferences(entry, JSGlobalProxy::cast(obj));
+ } else if (obj->IsJSArrayBuffer()) {
+ ExtractJSArrayBufferReferences(entry, JSArrayBuffer::cast(obj));
} else if (obj->IsJSObject()) {
ExtractJSObjectReferences(entry, JSObject::cast(obj));
} else if (obj->IsString()) {
@@ -1147,13 +1228,6 @@ void V8HeapExplorer::ExtractJSObjectReferences(
JSArrayBufferView::kBufferOffset);
SetWeakReference(view, entry, "weak_next", view->weak_next(),
JSArrayBufferView::kWeakNextOffset);
- } else if (obj->IsJSArrayBuffer()) {
- JSArrayBuffer* buffer = JSArrayBuffer::cast(obj);
- SetWeakReference(buffer, entry, "weak_next", buffer->weak_next(),
- JSArrayBuffer::kWeakNextOffset);
- SetWeakReference(buffer, entry,
- "weak_first_view", buffer->weak_first_view(),
- JSArrayBuffer::kWeakFirstViewOffset);
}
TagObject(js_obj->properties(), "(object properties)");
SetInternalReference(obj, entry,
@@ -1204,7 +1278,8 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
}
#define EXTRACT_CONTEXT_FIELD(index, type, name) \
- if (Context::index < Context::FIRST_WEAK_SLOT) { \
+ if (Context::index < Context::FIRST_WEAK_SLOT || \
+ Context::index == Context::MAP_CACHE_INDEX) { \
SetInternalReference(context, entry, #name, context->get(Context::index), \
FixedArray::OffsetOfElementAt(Context::index)); \
} else { \
@@ -1339,9 +1414,6 @@ void V8HeapExplorer::ExtractScriptReferences(int entry, Script* script) {
"name", script->name(),
Script::kNameOffset);
SetInternalReference(obj, entry,
- "data", script->data(),
- Script::kDataOffset);
- SetInternalReference(obj, entry,
"context_data", script->context_data(),
Script::kContextOffset);
TagObject(script->line_ends(), "(script line ends)");
@@ -1454,6 +1526,42 @@ void V8HeapExplorer::ExtractAllocationSiteReferences(int entry,
}
+class JSArrayBufferDataEntryAllocator : public HeapEntriesAllocator {
+ public:
+ JSArrayBufferDataEntryAllocator(size_t size, V8HeapExplorer* explorer)
+ : size_(size)
+ , explorer_(explorer) {
+ }
+ virtual HeapEntry* AllocateEntry(HeapThing ptr) {
+ return explorer_->AddEntry(
+ static_cast<Address>(ptr),
+ HeapEntry::kNative, "system / JSArrayBufferData", size_);
+ }
+ private:
+ size_t size_;
+ V8HeapExplorer* explorer_;
+};
+
+
+void V8HeapExplorer::ExtractJSArrayBufferReferences(
+ int entry, JSArrayBuffer* buffer) {
+ SetWeakReference(buffer, entry, "weak_next", buffer->weak_next(),
+ JSArrayBuffer::kWeakNextOffset);
+ SetWeakReference(buffer, entry,
+ "weak_first_view", buffer->weak_first_view(),
+ JSArrayBuffer::kWeakFirstViewOffset);
+ // Setup a reference to a native memory backing_store object.
+ if (!buffer->backing_store())
+ return;
+ size_t data_size = NumberToSize(heap_->isolate(), buffer->byte_length());
+ JSArrayBufferDataEntryAllocator allocator(data_size, this);
+ HeapEntry* data_entry =
+ filler_->FindOrAddEntry(buffer->backing_store(), &allocator);
+ filler_->SetNamedReference(HeapGraphEdge::kInternal,
+ entry, "backing_store", data_entry);
+}
+
+
void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
if (!js_obj->IsJSFunction()) return;
@@ -1712,7 +1820,7 @@ class RootsReferencesExtractor : public ObjectVisitor {
bool V8HeapExplorer::IterateAndExtractReferences(
- SnapshotFillerInterface* filler) {
+ SnapshotFiller* filler) {
filler_ = filler;
// Make sure builtin code objects get their builtin tags
@@ -2104,7 +2212,8 @@ HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
entries_type_,
name,
heap_object_map_->GenerateId(info),
- size != -1 ? static_cast<int>(size) : 0);
+ size != -1 ? static_cast<int>(size) : 0,
+ 0);
}
@@ -2222,7 +2331,7 @@ List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
bool NativeObjectsExplorer::IterateAndExtractReferences(
- SnapshotFillerInterface* filler) {
+ SnapshotFiller* filler) {
filler_ = filler;
FillRetainedObjects();
FillImplicitReferences();
@@ -2349,64 +2458,6 @@ void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
}
-class SnapshotFiller : public SnapshotFillerInterface {
- public:
- explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
- : snapshot_(snapshot),
- names_(snapshot->profiler()->names()),
- entries_(entries) { }
- HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- HeapEntry* entry = allocator->AllocateEntry(ptr);
- entries_->Pair(ptr, entry->index());
- return entry;
- }
- HeapEntry* FindEntry(HeapThing ptr) {
- int index = entries_->Map(ptr);
- return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index] : NULL;
- }
- HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- HeapEntry* entry = FindEntry(ptr);
- return entry != NULL ? entry : AddEntry(ptr, allocator);
- }
- void SetIndexedReference(HeapGraphEdge::Type type,
- int parent,
- int index,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- parent_entry->SetIndexedReference(type, index, child_entry);
- }
- void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
- int parent,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- int index = parent_entry->children_count() + 1;
- parent_entry->SetIndexedReference(type, index, child_entry);
- }
- void SetNamedReference(HeapGraphEdge::Type type,
- int parent,
- const char* reference_name,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- parent_entry->SetNamedReference(type, reference_name, child_entry);
- }
- void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
- int parent,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- int index = parent_entry->children_count() + 1;
- parent_entry->SetNamedReference(
- type,
- names_->GetName(index),
- child_entry);
- }
-
- private:
- HeapSnapshot* snapshot_;
- StringsStorage* names_;
- HeapEntriesMap* entries_;
-};
-
-
HeapSnapshotGenerator::HeapSnapshotGenerator(
HeapSnapshot* snapshot,
v8::ActivityControl* control,
@@ -2603,8 +2654,8 @@ class OutputStreamWriter {
// type, name|index, to_node.
const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3;
-// type, name, id, self_size, children_index.
-const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5;
+// type, name, id, self_size, edge_count, trace_node_id.
+const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 6;
void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
if (AllocationTracker* allocation_tracker =
@@ -2663,9 +2714,26 @@ int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
}
-static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
+namespace {
+
+template<size_t size> struct ToUnsigned;
+
+template<> struct ToUnsigned<4> {
+ typedef uint32_t Type;
+};
+
+template<> struct ToUnsigned<8> {
+ typedef uint64_t Type;
+};
+
+} // namespace
+
+
+template<typename T>
+static int utoa_impl(T value, const Vector<char>& buffer, int buffer_pos) {
+ STATIC_CHECK(static_cast<T>(-1) > 0); // Check that T is unsigned
int number_of_digits = 0;
- unsigned t = value;
+ T t = value;
do {
++number_of_digits;
} while (t /= 10);
@@ -2673,7 +2741,7 @@ static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
buffer_pos += number_of_digits;
int result = buffer_pos;
do {
- int last_digit = value % 10;
+ int last_digit = static_cast<int>(value % 10);
buffer[--buffer_pos] = '0' + last_digit;
value /= 10;
} while (value);
@@ -2681,6 +2749,14 @@ static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
}
+template<typename T>
+static int utoa(T value, const Vector<char>& buffer, int buffer_pos) {
+ typename ToUnsigned<sizeof(value)>::Type unsigned_value = value;
+ STATIC_CHECK(sizeof(value) == sizeof(unsigned_value));
+ return utoa_impl(unsigned_value, buffer, buffer_pos);
+}
+
+
void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
bool first_edge) {
// The buffer needs space for 3 unsigned ints, 3 commas, \n and \0
@@ -2717,10 +2793,11 @@ void HeapSnapshotJSONSerializer::SerializeEdges() {
void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
- // The buffer needs space for 5 unsigned ints, 5 commas, \n and \0
+ // The buffer needs space for 4 unsigned ints, 1 size_t, 5 commas, \n and \0
static const int kBufferSize =
5 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
- + 5 + 1 + 1;
+ + MaxDecimalDigitsIn<sizeof(size_t)>::kUnsigned // NOLINT
+ + 6 + 1 + 1;
EmbeddedVector<char, kBufferSize> buffer;
int buffer_pos = 0;
if (entry_index(entry) != 0) {
@@ -2735,6 +2812,8 @@ void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
buffer_pos = utoa(entry->self_size(), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
buffer_pos = utoa(entry->children_count(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(entry->trace_node_id(), buffer, buffer_pos);
buffer[buffer_pos++] = '\n';
buffer[buffer_pos++] = '\0';
writer_->AddString(buffer.start());
@@ -2768,7 +2847,8 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
JSON_S("name") ","
JSON_S("id") ","
JSON_S("self_size") ","
- JSON_S("edge_count")) ","
+ JSON_S("edge_count") ","
+ JSON_S("trace_node_id")) ","
JSON_S("node_types") ":" JSON_A(
JSON_A(
JSON_S("hidden") ","
@@ -2813,7 +2893,7 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
JSON_S("column")) ","
JSON_S("trace_node_fields") ":" JSON_A(
JSON_S("id") ","
- JSON_S("function_id") ","
+ JSON_S("function_info_index") ","
JSON_S("count") ","
JSON_S("size") ","
JSON_S("children"))));
@@ -2828,7 +2908,7 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
uint32_t count = 0;
AllocationTracker* tracker = snapshot_->profiler()->allocation_tracker();
if (tracker) {
- count = tracker->id_to_function_info()->occupancy();
+ count = tracker->function_info_list().length();
}
writer_->AddNumber(count);
}
@@ -2861,7 +2941,7 @@ void HeapSnapshotJSONSerializer::SerializeTraceNode(AllocationTraceNode* node) {
int buffer_pos = 0;
buffer_pos = utoa(node->id(), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
- buffer_pos = utoa(node->function_id(), buffer, buffer_pos);
+ buffer_pos = utoa(node->function_info_index(), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
buffer_pos = utoa(node->allocation_count(), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
@@ -2903,22 +2983,18 @@ void HeapSnapshotJSONSerializer::SerializeTraceNodeInfos() {
6 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ 6 + 1 + 1;
EmbeddedVector<char, kBufferSize> buffer;
- HashMap* id_to_function_info = tracker->id_to_function_info();
+ const List<AllocationTracker::FunctionInfo*>& list =
+ tracker->function_info_list();
bool first_entry = true;
- for (HashMap::Entry* p = id_to_function_info->Start();
- p != NULL;
- p = id_to_function_info->Next(p)) {
- SnapshotObjectId id =
- static_cast<SnapshotObjectId>(reinterpret_cast<intptr_t>(p->key));
- AllocationTracker::FunctionInfo* info =
- reinterpret_cast<AllocationTracker::FunctionInfo* >(p->value);
+ for (int i = 0; i < list.length(); i++) {
+ AllocationTracker::FunctionInfo* info = list[i];
int buffer_pos = 0;
if (first_entry) {
first_entry = false;
} else {
buffer[buffer_pos++] = ',';
}
- buffer_pos = utoa(id, buffer, buffer_pos);
+ buffer_pos = utoa(info->function_id, buffer, buffer_pos);
buffer[buffer_pos++] = ',';
buffer_pos = utoa(GetStringId(info->name), buffer, buffer_pos);
buffer[buffer_pos++] = ',';
diff --git a/deps/v8/src/heap-snapshot-generator.h b/deps/v8/src/heap-snapshot-generator.h
index e209eeabb..634ede19a 100644
--- a/deps/v8/src/heap-snapshot-generator.h
+++ b/deps/v8/src/heap-snapshot-generator.h
@@ -37,6 +37,7 @@ class AllocationTracker;
class AllocationTraceNode;
class HeapEntry;
class HeapSnapshot;
+class SnapshotFiller;
class HeapGraphEdge BASE_EMBEDDED {
public:
@@ -114,14 +115,16 @@ class HeapEntry BASE_EMBEDDED {
Type type,
const char* name,
SnapshotObjectId id,
- int self_size);
+ size_t self_size,
+ unsigned trace_node_id);
HeapSnapshot* snapshot() { return snapshot_; }
Type type() { return static_cast<Type>(type_); }
const char* name() { return name_; }
void set_name(const char* name) { name_ = name; }
inline SnapshotObjectId id() { return id_; }
- int self_size() { return self_size_; }
+ size_t self_size() { return self_size_; }
+ unsigned trace_node_id() const { return trace_node_id_; }
INLINE(int index() const);
int children_count() const { return children_count_; }
INLINE(int set_children_index(int index));
@@ -146,10 +149,12 @@ class HeapEntry BASE_EMBEDDED {
unsigned type_: 4;
int children_count_: 28;
int children_index_;
- int self_size_;
- SnapshotObjectId id_;
+ size_t self_size_;
HeapSnapshot* snapshot_;
const char* name_;
+ SnapshotObjectId id_;
+ // id of allocation stack trace top node
+ unsigned trace_node_id_;
};
@@ -186,7 +191,8 @@ class HeapSnapshot {
HeapEntry* AddEntry(HeapEntry::Type type,
const char* name,
SnapshotObjectId id,
- int size);
+ size_t size,
+ unsigned trace_node_id);
HeapEntry* AddRootEntry();
HeapEntry* AddGcRootsEntry();
HeapEntry* AddGcSubrootEntry(int tag);
@@ -228,7 +234,7 @@ class HeapObjectsMap {
SnapshotObjectId FindOrAddEntry(Address addr,
unsigned int size,
bool accessed = true);
- void MoveObject(Address from, Address to, int size);
+ bool MoveObject(Address from, Address to, int size);
void UpdateObjectSize(Address addr, int size);
SnapshotObjectId last_assigned_id() const {
return next_id_ - kObjectIdStep;
@@ -338,32 +344,6 @@ class HeapObjectsSet {
};
-// An interface used to populate a snapshot with nodes and edges.
-class SnapshotFillerInterface {
- public:
- virtual ~SnapshotFillerInterface() { }
- virtual HeapEntry* AddEntry(HeapThing ptr,
- HeapEntriesAllocator* allocator) = 0;
- virtual HeapEntry* FindEntry(HeapThing ptr) = 0;
- virtual HeapEntry* FindOrAddEntry(HeapThing ptr,
- HeapEntriesAllocator* allocator) = 0;
- virtual void SetIndexedReference(HeapGraphEdge::Type type,
- int parent_entry,
- int index,
- HeapEntry* child_entry) = 0;
- virtual void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
- int parent_entry,
- HeapEntry* child_entry) = 0;
- virtual void SetNamedReference(HeapGraphEdge::Type type,
- int parent_entry,
- const char* reference_name,
- HeapEntry* child_entry) = 0;
- virtual void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
- int parent_entry,
- HeapEntry* child_entry) = 0;
-};
-
-
class SnapshottingProgressReportingInterface {
public:
virtual ~SnapshottingProgressReportingInterface() { }
@@ -380,12 +360,16 @@ class V8HeapExplorer : public HeapEntriesAllocator {
v8::HeapProfiler::ObjectNameResolver* resolver);
virtual ~V8HeapExplorer();
virtual HeapEntry* AllocateEntry(HeapThing ptr);
- void AddRootEntries(SnapshotFillerInterface* filler);
+ void AddRootEntries(SnapshotFiller* filler);
int EstimateObjectsCount(HeapIterator* iterator);
- bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
+ bool IterateAndExtractReferences(SnapshotFiller* filler);
void TagGlobalObjects();
void TagCodeObject(Code* code);
void TagBuiltinCodeObject(Code* code, const char* name);
+ HeapEntry* AddEntry(Address address,
+ HeapEntry::Type type,
+ const char* name,
+ size_t size);
static String* GetConstructorName(JSObject* object);
@@ -396,6 +380,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapEntry* AddEntry(HeapObject* object,
HeapEntry::Type type,
const char* name);
+
const char* GetSystemEntryName(HeapObject* object);
void ExtractReferences(HeapObject* obj);
@@ -414,6 +399,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractCellReferences(int entry, Cell* cell);
void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
void ExtractAllocationSiteReferences(int entry, AllocationSite* site);
+ void ExtractJSArrayBufferReferences(int entry, JSArrayBuffer* buffer);
void ExtractClosureReferences(JSObject* js_obj, int entry);
void ExtractPropertyReferences(JSObject* js_obj, int entry);
bool ExtractAccessorPairProperty(JSObject* js_obj, int entry,
@@ -477,7 +463,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
StringsStorage* names_;
HeapObjectsMap* heap_object_map_;
SnapshottingProgressReportingInterface* progress_;
- SnapshotFillerInterface* filler_;
+ SnapshotFiller* filler_;
HeapObjectsSet objects_tags_;
HeapObjectsSet strong_gc_subroot_names_;
HeapObjectsSet user_roots_;
@@ -504,9 +490,9 @@ class NativeObjectsExplorer {
NativeObjectsExplorer(HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress);
virtual ~NativeObjectsExplorer();
- void AddRootEntries(SnapshotFillerInterface* filler);
+ void AddRootEntries(SnapshotFiller* filler);
int EstimateObjectsCount();
- bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
+ bool IterateAndExtractReferences(SnapshotFiller* filler);
private:
void FillRetainedObjects();
@@ -546,7 +532,7 @@ class NativeObjectsExplorer {
HeapEntriesAllocator* synthetic_entries_allocator_;
HeapEntriesAllocator* native_entries_allocator_;
// Used during references extraction.
- SnapshotFillerInterface* filler_;
+ SnapshotFiller* filler_;
static HeapThing const kNativesRootObject;
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 82cf45f74..6374433bb 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -105,7 +105,6 @@ Heap::Heap()
unflattened_strings_length_(0),
#ifdef DEBUG
allocation_timeout_(0),
- disallow_allocation_failure_(false),
#endif // DEBUG
new_space_high_promotion_mode_active_(false),
old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
@@ -155,7 +154,7 @@ Heap::Heap()
configured_(false),
external_string_table_(this),
chunks_queued_for_free_(NULL),
- relocation_mutex_(NULL) {
+ gc_callbacks_depth_(0) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -545,7 +544,9 @@ void Heap::ProcessPretenuringFeedback() {
}
}
- if (trigger_deoptimization) isolate_->stack_guard()->DeoptMarkedCode();
+ if (trigger_deoptimization) {
+ isolate_->stack_guard()->DeoptMarkedAllocationSites();
+ }
FlushAllocationSitesScratchpad();
@@ -567,6 +568,25 @@ void Heap::ProcessPretenuringFeedback() {
}
+void Heap::DeoptMarkedAllocationSites() {
+ // TODO(hpayer): If iterating over the allocation sites list becomes a
+ // performance issue, use a cache heap data structure instead (similar to the
+ // allocation sites scratchpad).
+ Object* list_element = allocation_sites_list();
+ while (list_element->IsAllocationSite()) {
+ AllocationSite* site = AllocationSite::cast(list_element);
+ if (site->deopt_dependent_code()) {
+ site->dependent_code()->MarkCodeForDeoptimization(
+ isolate_,
+ DependentCode::kAllocationSiteTenuringChangedGroup);
+ site->set_deopt_dependent_code(false);
+ }
+ list_element = site->weak_next();
+ }
+ Deoptimizer::DeoptimizeMarkedCode(isolate_);
+}
+
+
void Heap::GarbageCollectionEpilogue() {
store_buffer()->GCEpilogue();
@@ -575,6 +595,9 @@ void Heap::GarbageCollectionEpilogue() {
ZapFromSpace();
}
+ // Process pretenuring feedback and update allocation sites.
+ ProcessPretenuringFeedback();
+
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -752,6 +775,21 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
}
+void Heap::EnsureFillerObjectAtTop() {
+ // There may be an allocation memento behind every object in new space.
+ // If we evacuate a not full new space or if we are on the last page of
+ // the new space, then there may be uninitialized memory behind the top
+ // pointer of the new space page. We store a filler object there to
+ // identify the unused space.
+ Address from_top = new_space_.top();
+ Address from_limit = new_space_.limit();
+ if (from_top < from_limit) {
+ int remaining_in_page = static_cast<int>(from_limit - from_top);
+ CreateFillerObjectAt(from_top, remaining_in_page);
+ }
+}
+
+
bool Heap::CollectGarbage(GarbageCollector collector,
const char* gc_reason,
const char* collector_reason,
@@ -768,17 +806,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
allocation_timeout_ = Max(6, FLAG_gc_interval);
#endif
- // There may be an allocation memento behind every object in new space.
- // If we evacuate a not full new space or if we are on the last page of
- // the new space, then there may be uninitialized memory behind the top
- // pointer of the new space page. We store a filler object there to
- // identify the unused space.
- Address from_top = new_space_.top();
- Address from_limit = new_space_.limit();
- if (from_top < from_limit) {
- int remaining_in_page = static_cast<int>(from_limit - from_top);
- CreateFillerObjectAt(from_top, remaining_in_page);
- }
+ EnsureFillerObjectAtTop();
if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
if (FLAG_trace_incremental_marking) {
@@ -852,16 +880,6 @@ int Heap::NotifyContextDisposed() {
}
-void Heap::PerformScavenge() {
- GCTracer tracer(this, NULL, NULL);
- if (incremental_marking()->IsStopped()) {
- PerformGarbageCollection(SCAVENGER, &tracer);
- } else {
- PerformGarbageCollection(MARK_COMPACTOR, &tracer);
- }
-}
-
-
void Heap::MoveElements(FixedArray* array,
int dst_index,
int src_index,
@@ -1068,11 +1086,14 @@ bool Heap::PerformGarbageCollection(
GCType gc_type =
collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
- {
- GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
+ { GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ AllowHeapAllocation allow_allocation;
+ GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
+ }
}
EnsureFromSpaceIsCommitted();
@@ -1177,11 +1198,14 @@ bool Heap::PerformGarbageCollection(
amount_of_external_allocated_memory_;
}
- {
- GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
+ { GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ AllowHeapAllocation allow_allocation;
+ GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
+ }
}
#ifdef VERIFY_HEAP
@@ -1621,8 +1645,6 @@ void Heap::Scavenge() {
IncrementYoungSurvivorsCounter(static_cast<int>(
(PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
- ProcessPretenuringFeedback();
-
LOG(isolate_, ResourceEvent("scavenge", "end"));
gc_state_ = NOT_IN_GC;
@@ -1753,6 +1775,18 @@ static Object* VisitWeakList(Heap* heap,
}
+template <class T>
+static void ClearWeakList(Heap* heap,
+ Object* list) {
+ Object* undefined = heap->undefined_value();
+ while (list != undefined) {
+ T* candidate = reinterpret_cast<T*>(list);
+ list = WeakListVisitor<T>::WeakNext(candidate);
+ WeakListVisitor<T>::SetWeakNext(candidate, undefined);
+ }
+}
+
+
template<>
struct WeakListVisitor<JSFunction> {
static void SetWeakNext(JSFunction* function, Object* next) {
@@ -1846,7 +1880,11 @@ struct WeakListVisitor<Context> {
}
}
- static void VisitPhantomObject(Heap*, Context*) {
+ static void VisitPhantomObject(Heap* heap, Context* context) {
+ ClearWeakList<JSFunction>(heap,
+ context->get(Context::OPTIMIZED_FUNCTIONS_LIST));
+ ClearWeakList<Code>(heap, context->get(Context::OPTIMIZED_CODE_LIST));
+ ClearWeakList<Code>(heap, context->get(Context::DEOPTIMIZED_CODE_LIST));
}
static int WeakNextOffset() {
@@ -2002,14 +2040,12 @@ void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
AllocationSite* casted = AllocationSite::cast(cur);
if (casted->GetPretenureMode() == flag) {
casted->ResetPretenureDecision();
- bool got_marked = casted->dependent_code()->MarkCodeForDeoptimization(
- isolate_,
- DependentCode::kAllocationSiteTenuringChangedGroup);
- if (got_marked) marked = true;
+ casted->set_deopt_dependent_code(true);
+ marked = true;
}
cur = casted->weak_next();
}
- if (marked) isolate_->stack_guard()->DeoptMarkedCode();
+ if (marked) isolate_->stack_guard()->DeoptMarkedAllocationSites();
}
@@ -2672,8 +2708,7 @@ MaybeObject* Heap::AllocateTypeFeedbackInfo() {
if (!maybe_info->To(&info)) return maybe_info;
}
info->initialize_storage();
- info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
- SKIP_WRITE_BARRIER);
+ info->set_feedback_vector(empty_fixed_array(), SKIP_WRITE_BARRIER);
return info;
}
@@ -2856,7 +2891,7 @@ bool Heap::CreateInitialMaps() {
TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, non_strict_arguments_elements)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
@@ -2915,6 +2950,16 @@ bool Heap::CreateInitialMaps() {
TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
#undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
+
+#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+ { FixedTypedArrayBase* obj; \
+ if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array)->To(&obj)) \
+ return false; \
+ set_empty_fixed_##type##_array(obj); \
+ }
+
+ TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
+#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
}
ASSERT(!InNewSpace(empty_fixed_array()));
return true;
@@ -3055,6 +3100,17 @@ void Heap::CreateFixedStubs() {
// The eliminates the need for doing dictionary lookup in the
// stub cache for these stubs.
HandleScope scope(isolate());
+
+ // Create stubs that should be there, so we don't unexpectedly have to
+ // create them if we need them during the creation of another stub.
+ // Stub creation mixes raw pointers and handles in an unsafe manner so
+ // we cannot create stubs while we are creating stubs.
+ CodeStub::GenerateStubsAheadOfTime(isolate());
+
+ // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
+ // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
+ // is created.
+
// gcc-4.4 has problem generating correct code of following snippet:
// { JSEntryStub stub;
// js_entry_code_ = *stub.GetCode();
@@ -3065,12 +3121,6 @@ void Heap::CreateFixedStubs() {
// To workaround the problem, make separate functions without inlining.
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();
-
- // Create stubs that should be there, so we don't unexpectedly have to
- // create them if we need them during the creation of another stub.
- // Stub creation mixes raw pointers and handles in an unsafe manner so
- // we cannot create stubs while we are creating stubs.
- CodeStub::GenerateStubsAheadOfTime(isolate());
}
@@ -3263,6 +3313,9 @@ bool Heap::CreateInitialObjects() {
}
set_undefined_cell(Cell::cast(obj));
+ // The symbol registry is initialized lazily.
+ set_symbol_registry(undefined_value());
+
// Allocate object to hold object observation state.
{ MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
if (!maybe_obj->ToObject(&obj)) return false;
@@ -3272,6 +3325,15 @@ bool Heap::CreateInitialObjects() {
}
set_observation_state(JSObject::cast(obj));
+ // Allocate object to hold object microtask state.
+ { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_microtask_state(JSObject::cast(obj));
+
{ MaybeObject* maybe_obj = AllocateSymbol();
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -3282,8 +3344,26 @@ bool Heap::CreateInitialObjects() {
if (!maybe_obj->ToObject(&obj)) return false;
}
Symbol::cast(obj)->set_is_private(true);
+ set_nonexistent_symbol(Symbol::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateSymbol();
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ Symbol::cast(obj)->set_is_private(true);
set_elements_transition_symbol(Symbol::cast(obj));
+ { MaybeObject* maybe_obj = AllocateSymbol();
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ Symbol::cast(obj)->set_is_private(true);
+ set_uninitialized_symbol(Symbol::cast(obj));
+
+ { MaybeObject* maybe_obj = AllocateSymbol();
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ Symbol::cast(obj)->set_is_private(true);
+ set_megamorphic_symbol(Symbol::cast(obj));
+
{ MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -3302,7 +3382,7 @@ bool Heap::CreateInitialObjects() {
set_materialized_objects(FixedArray::cast(obj));
// Handling of script id generation is in Factory::NewScript.
- set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
+ set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
{ MaybeObject* maybe_obj = AllocateAllocationSitesScratchpad();
if (!maybe_obj->ToObject(&obj)) return false;
@@ -3623,10 +3703,25 @@ void Heap::InitializeAllocationSitesScratchpad() {
}
-void Heap::AddAllocationSiteToScratchpad(AllocationSite* site) {
+void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
+ ScratchpadSlotMode mode) {
if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
+ // We cannot use the normal write-barrier because slots need to be
+ // recorded with non-incremental marking as well. We have to explicitly
+ // record the slot to take evacuation candidates into account.
allocation_sites_scratchpad()->set(
- allocation_sites_scratchpad_length_, site);
+ allocation_sites_scratchpad_length_, site, SKIP_WRITE_BARRIER);
+ Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
+ allocation_sites_scratchpad_length_);
+
+ if (mode == RECORD_SCRATCHPAD_SLOT) {
+ // We need to allow slots buffer overflow here since the evacuation
+ // candidates are not part of the global list of old space pages and
+ // releasing an evacuation candidate due to a slots buffer overflow
+ // results in lost pages.
+ mark_compact_collector()->RecordSlot(
+ slot, slot, *slot, SlotsBuffer::IGNORE_OVERFLOW);
+ }
allocation_sites_scratchpad_length_++;
}
}
@@ -3693,12 +3788,34 @@ Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
}
+Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
+ ElementsKind elementsKind) {
+ switch (elementsKind) {
+#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ return kEmptyFixed##Type##ArrayRootIndex;
+
+ TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
+#undef ELEMENT_KIND_TO_ROOT_INDEX
+ default:
+ UNREACHABLE();
+ return kUndefinedValueRootIndex;
+ }
+}
+
+
ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
return ExternalArray::cast(
roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
}
+FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
+ return FixedTypedArrayBase::cast(
+ roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
+}
+
+
MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
// We need to distinguish the minus zero value and this cannot be
// done after conversion to int. Doing this by comparing bit
@@ -3773,7 +3890,6 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type,
int start_position,
int end_position,
Object* script,
- Object* stack_trace,
Object* stack_frames) {
Object* result;
{ MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
@@ -3788,7 +3904,6 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type,
message->set_start_position(start_position);
message->set_end_position(end_position);
message->set_script(script);
- message->set_stack_trace(stack_trace);
message->set_stack_frames(stack_frames);
return result;
}
@@ -3798,8 +3913,7 @@ MaybeObject* Heap::AllocateExternalStringFromAscii(
const ExternalAsciiString::Resource* resource) {
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
- isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x5);
+ return isolate()->ThrowInvalidStringLength();
}
Map* map = external_ascii_string_map();
@@ -3821,8 +3935,7 @@ MaybeObject* Heap::AllocateExternalStringFromTwoByte(
const ExternalTwoByteString::Resource* resource) {
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
- isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x6);
+ return isolate()->ThrowInvalidStringLength();
}
// For small strings we check whether the resource contains only
@@ -3873,7 +3986,7 @@ MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
if (length < 0 || length > ByteArray::kMaxLength) {
- return Failure::OutOfMemoryException(0x7);
+ v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
}
int size = ByteArray::SizeFor(length);
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
@@ -3903,6 +4016,33 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
}
+bool Heap::CanMoveObjectStart(HeapObject* object) {
+ Address address = object->address();
+ bool is_in_old_pointer_space = InOldPointerSpace(address);
+ bool is_in_old_data_space = InOldDataSpace(address);
+
+ if (lo_space()->Contains(object)) return false;
+
+ // We cannot move the object start if the given old space page is
+ // concurrently swept.
+ return (!is_in_old_pointer_space && !is_in_old_data_space) ||
+ Page::FromAddress(address)->parallel_sweeping() <=
+ MemoryChunk::PARALLEL_SWEEPING_FINALIZE;
+}
+
+
+void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
+ if (incremental_marking()->IsMarking() &&
+ Marking::IsBlack(Marking::MarkBitFrom(address))) {
+ if (mode == FROM_GC) {
+ MemoryChunk::IncrementLiveBytesFromGC(address, by);
+ } else {
+ MemoryChunk::IncrementLiveBytesFromMutator(address, by);
+ }
+ }
+}
+
+
MaybeObject* Heap::AllocateExternalArray(int length,
ExternalArrayType array_type,
void* external_pointer,
@@ -3971,6 +4111,7 @@ MaybeObject* Heap::AllocateFixedTypedArray(int length,
reinterpret_cast<FixedTypedArrayBase*>(object);
elements->set_map(MapForFixedTypedArray(array_type));
elements->set_length(length);
+ memset(elements->DataPtr(), 0, elements->DataSize());
return elements;
}
@@ -3981,12 +4122,20 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
bool immovable,
bool crankshafted,
int prologue_offset) {
- // Allocate ByteArray before the Code object, so that we do not risk
- // leaving uninitialized Code object (and breaking the heap).
+ // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
+ // do not risk leaving uninitialized Code object (and breaking the heap).
ByteArray* reloc_info;
MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
+ ConstantPoolArray* constant_pool;
+ if (FLAG_enable_ool_constant_pool) {
+ MaybeObject* maybe_constant_pool = desc.origin->AllocateConstantPool(this);
+ if (!maybe_constant_pool->To(&constant_pool)) return maybe_constant_pool;
+ } else {
+ constant_pool = empty_constant_pool_array();
+ }
+
// Compute size.
int body_size = RoundUp(desc.instr_size, kObjectAlignment);
int obj_size = Code::SizeFor(body_size);
@@ -4026,6 +4175,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->set_is_crankshafted(crankshafted);
code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_raw_type_feedback_info(undefined_value());
+ code->set_next_code_link(undefined_value());
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
@@ -4033,7 +4183,11 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
code->set_marked_for_deoptimization(false);
}
- code->set_constant_pool(empty_constant_pool_array());
+
+ if (FLAG_enable_ool_constant_pool) {
+ desc.origin->PopulateConstantPool(constant_pool);
+ }
+ code->set_constant_pool(constant_pool);
#ifdef ENABLE_DEBUGGER_SUPPORT
if (code->kind() == Code::FUNCTION) {
@@ -4064,9 +4218,20 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
MaybeObject* Heap::CopyCode(Code* code) {
+ MaybeObject* maybe_result;
+ Object* new_constant_pool;
+ if (FLAG_enable_ool_constant_pool &&
+ code->constant_pool() != empty_constant_pool_array()) {
+ // Copy the constant pool, since edits to the copied code may modify
+ // the constant pool.
+ maybe_result = CopyConstantPoolArray(code->constant_pool());
+ if (!maybe_result->ToObject(&new_constant_pool)) return maybe_result;
+ } else {
+ new_constant_pool = empty_constant_pool_array();
+ }
+
// Allocate an object the same size as the code object.
int obj_size = code->Size();
- MaybeObject* maybe_result;
if (obj_size > code_space()->AreaSize()) {
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
@@ -4080,8 +4245,12 @@ MaybeObject* Heap::CopyCode(Code* code) {
Address old_addr = code->address();
Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
CopyBlock(new_addr, old_addr, obj_size);
- // Relocate the copy.
Code* new_code = Code::cast(result);
+
+ // Update the constant pool.
+ new_code->set_constant_pool(new_constant_pool);
+
+ // Relocate the copy.
ASSERT(!isolate_->code_range()->exists() ||
isolate_->code_range()->contains(code->address()));
new_code->Relocate(new_addr - old_addr);
@@ -4090,8 +4259,8 @@ MaybeObject* Heap::CopyCode(Code* code) {
MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
- // Allocate ByteArray before the Code object, so that we do not risk
- // leaving uninitialized Code object (and breaking the heap).
+ // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
+ // do not risk leaving uninitialized Code object (and breaking the heap).
Object* reloc_info_array;
{ MaybeObject* maybe_reloc_info_array =
AllocateByteArray(reloc_info.length(), TENURED);
@@ -4099,6 +4268,18 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
return maybe_reloc_info_array;
}
}
+ Object* new_constant_pool;
+ if (FLAG_enable_ool_constant_pool &&
+ code->constant_pool() != empty_constant_pool_array()) {
+ // Copy the constant pool, since edits to the copied code may modify
+ // the constant pool.
+ MaybeObject* maybe_constant_pool =
+ CopyConstantPoolArray(code->constant_pool());
+ if (!maybe_constant_pool->ToObject(&new_constant_pool))
+ return maybe_constant_pool;
+ } else {
+ new_constant_pool = empty_constant_pool_array();
+ }
int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
@@ -4128,6 +4309,9 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Code* new_code = Code::cast(result);
new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
+ // Update constant pool.
+ new_code->set_constant_pool(new_constant_pool);
+
// Copy patched rinfo.
CopyBytes(new_code->relocation_start(),
reloc_info.start(),
@@ -4158,28 +4342,8 @@ void Heap::InitializeAllocationMemento(AllocationMemento* memento,
}
-MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
- Handle<AllocationSite> allocation_site) {
- ASSERT(gc_state_ == NOT_IN_GC);
- ASSERT(map->instance_type() != MAP_TYPE);
- // If allocation failures are disallowed, we may allocate in a different
- // space when new space is full and the object is not a large object.
- AllocationSpace retry_space =
- (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
- int size = map->instance_size() + AllocationMemento::kSize;
- Object* result;
- MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- // No need for write barrier since object is white and map is in old space.
- HeapObject::cast(result)->set_map_no_write_barrier(map);
- AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
- reinterpret_cast<Address>(result) + map->instance_size());
- InitializeAllocationMemento(alloc_memento, *allocation_site);
- return result;
-}
-
-
-MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
+MaybeObject* Heap::Allocate(Map* map, AllocationSpace space,
+ AllocationSite* allocation_site) {
ASSERT(gc_state_ == NOT_IN_GC);
ASSERT(map->instance_type() != MAP_TYPE);
// If allocation failures are disallowed, we may allocate in a different
@@ -4187,11 +4351,19 @@ MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
AllocationSpace retry_space =
(space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
int size = map->instance_size();
+ if (allocation_site != NULL) {
+ size += AllocationMemento::kSize;
+ }
Object* result;
MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
if (!maybe_result->ToObject(&result)) return maybe_result;
// No need for write barrier since object is white and map is in old space.
HeapObject::cast(result)->set_map_no_write_barrier(map);
+ if (allocation_site != NULL) {
+ AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+ reinterpret_cast<Address>(result) + map->instance_size());
+ InitializeAllocationMemento(alloc_memento, allocation_site);
+ }
return result;
}
@@ -4233,16 +4405,15 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
JSObject* boilerplate;
int arguments_object_size;
bool strict_mode_callee = callee->IsJSFunction() &&
- !JSFunction::cast(callee)->shared()->is_classic_mode();
+ JSFunction::cast(callee)->shared()->strict_mode() == STRICT;
if (strict_mode_callee) {
boilerplate =
- isolate()->context()->native_context()->
- strict_mode_arguments_boilerplate();
- arguments_object_size = kArgumentsObjectSizeStrict;
+ isolate()->context()->native_context()->strict_arguments_boilerplate();
+ arguments_object_size = kStrictArgumentsObjectSize;
} else {
boilerplate =
- isolate()->context()->native_context()->arguments_boilerplate();
- arguments_object_size = kArgumentsObjectSize;
+ isolate()->context()->native_context()->sloppy_arguments_boilerplate();
+ arguments_object_size = kSloppyArgumentsObjectSize;
}
// Check that the size of the boilerplate matches our
@@ -4268,7 +4439,7 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
Smi::FromInt(length),
SKIP_WRITE_BARRIER);
- // Set the callee property for non-strict mode arguments object only.
+ // Set the callee property for sloppy mode arguments object only.
if (!strict_mode_callee) {
JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
callee);
@@ -4315,7 +4486,10 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj,
MaybeObject* Heap::AllocateJSObjectFromMap(
- Map* map, PretenureFlag pretenure, bool allocate_properties) {
+ Map* map,
+ PretenureFlag pretenure,
+ bool allocate_properties,
+ AllocationSite* allocation_site) {
// JSFunctions should be allocated using AllocateFunction to be
// properly initialized.
ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
@@ -4341,90 +4515,28 @@ MaybeObject* Heap::AllocateJSObjectFromMap(
int size = map->instance_size();
AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
Object* obj;
- MaybeObject* maybe_obj = Allocate(map, space);
+ MaybeObject* maybe_obj = Allocate(map, space, allocation_site);
if (!maybe_obj->To(&obj)) return maybe_obj;
// Initialize the JSObject.
InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
ASSERT(JSObject::cast(obj)->HasFastElements() ||
- JSObject::cast(obj)->HasExternalArrayElements());
- return obj;
-}
-
-
-MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
- Map* map, Handle<AllocationSite> allocation_site) {
- // JSFunctions should be allocated using AllocateFunction to be
- // properly initialized.
- ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
-
- // Both types of global objects should be allocated using
- // AllocateGlobalObject to be properly initialized.
- ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
- ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
-
- // Allocate the backing storage for the properties.
- int prop_size = map->InitialPropertiesLength();
- ASSERT(prop_size >= 0);
- FixedArray* properties;
- { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
- if (!maybe_properties->To(&properties)) return maybe_properties;
- }
-
- // Allocate the JSObject.
- int size = map->instance_size();
- AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, NOT_TENURED);
- Object* obj;
- MaybeObject* maybe_obj =
- AllocateWithAllocationSite(map, space, allocation_site);
- if (!maybe_obj->To(&obj)) return maybe_obj;
-
- // Initialize the JSObject.
- InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
- ASSERT(JSObject::cast(obj)->HasFastElements());
+ JSObject::cast(obj)->HasExternalArrayElements() ||
+ JSObject::cast(obj)->HasFixedTypedArrayElements());
return obj;
}
MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
- PretenureFlag pretenure) {
+ PretenureFlag pretenure,
+ AllocationSite* allocation_site) {
ASSERT(constructor->has_initial_map());
- // Allocate the object based on the constructors initial map.
- MaybeObject* result = AllocateJSObjectFromMap(
- constructor->initial_map(), pretenure);
-#ifdef DEBUG
- // Make sure result is NOT a global object if valid.
- Object* non_failure;
- ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
-#endif
- return result;
-}
-
-MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
- Handle<AllocationSite> allocation_site) {
- ASSERT(constructor->has_initial_map());
- // Allocate the object based on the constructors initial map, or the payload
- // advice
- Map* initial_map = constructor->initial_map();
-
- ElementsKind to_kind = allocation_site->GetElementsKind();
- AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
- if (to_kind != initial_map->elements_kind()) {
- MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
- if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
- // Possibly alter the mode, since we found an updated elements kind
- // in the type info cell.
- mode = AllocationSite::GetMode(to_kind);
- }
-
- MaybeObject* result;
- if (mode == TRACK_ALLOCATION_SITE) {
- result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
- allocation_site);
- } else {
- result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
- }
+ // Allocate the object based on the constructors initial map.
+ MaybeObject* result = AllocateJSObjectFromMap(constructor->initial_map(),
+ pretenure,
+ true,
+ allocation_site);
#ifdef DEBUG
// Make sure result is NOT a global object if valid.
Object* non_failure;
@@ -4926,16 +5038,13 @@ MaybeObject* Heap::AllocateInternalizedStringImpl(
int size;
Map* map;
+ if (chars < 0 || chars > String::kMaxLength) {
+ return isolate()->ThrowInvalidStringLength();
+ }
if (is_one_byte) {
- if (chars > SeqOneByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0x9);
- }
map = ascii_internalized_string_map();
size = SeqOneByteString::SizeFor(chars);
} else {
- if (chars > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0xa);
- }
map = internalized_string_map();
size = SeqTwoByteString::SizeFor(chars);
}
@@ -4977,8 +5086,8 @@ MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
MaybeObject* Heap::AllocateRawOneByteString(int length,
PretenureFlag pretenure) {
- if (length < 0 || length > SeqOneByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0xb);
+ if (length < 0 || length > String::kMaxLength) {
+ return isolate()->ThrowInvalidStringLength();
}
int size = SeqOneByteString::SizeFor(length);
ASSERT(size <= SeqOneByteString::kMaxSize);
@@ -5001,8 +5110,8 @@ MaybeObject* Heap::AllocateRawOneByteString(int length,
MaybeObject* Heap::AllocateRawTwoByteString(int length,
PretenureFlag pretenure) {
- if (length < 0 || length > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException(0xc);
+ if (length < 0 || length > String::kMaxLength) {
+ return isolate()->ThrowInvalidStringLength();
}
int size = SeqTwoByteString::SizeFor(length);
ASSERT(size <= SeqTwoByteString::kMaxSize);
@@ -5054,6 +5163,38 @@ MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
}
+MaybeObject* Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
+ if (!InNewSpace(src)) {
+ return src;
+ }
+
+ int len = src->length();
+ Object* obj;
+ { MaybeObject* maybe_obj = AllocateRawFixedArray(len, TENURED);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ HeapObject::cast(obj)->set_map_no_write_barrier(fixed_array_map());
+ FixedArray* result = FixedArray::cast(obj);
+ result->set_length(len);
+
+ // Copy the content
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+ for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
+
+ // TODO(mvstanton): The map is set twice because of protection against calling
+ // set() on a COW FixedArray. Issue v8:3221 created to track this, and
+ // we might then be able to remove this whole method.
+ HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
+ return result;
+}
+
+
+MaybeObject* Heap::AllocateEmptyFixedTypedArray(ExternalArrayType array_type) {
+ return AllocateFixedTypedArray(0, array_type, TENURED);
+}
+
+
MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
int len = src->length();
Object* obj;
@@ -5100,27 +5241,30 @@ MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
MaybeObject* Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
Map* map) {
int int64_entries = src->count_of_int64_entries();
- int ptr_entries = src->count_of_ptr_entries();
+ int code_ptr_entries = src->count_of_code_ptr_entries();
+ int heap_ptr_entries = src->count_of_heap_ptr_entries();
int int32_entries = src->count_of_int32_entries();
Object* obj;
{ MaybeObject* maybe_obj =
- AllocateConstantPoolArray(int64_entries, ptr_entries, int32_entries);
+ AllocateConstantPoolArray(int64_entries, code_ptr_entries,
+ heap_ptr_entries, int32_entries);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
HeapObject* dst = HeapObject::cast(obj);
dst->set_map_no_write_barrier(map);
+ int size = ConstantPoolArray::SizeFor(
+ int64_entries, code_ptr_entries, heap_ptr_entries, int32_entries);
CopyBlock(
dst->address() + ConstantPoolArray::kLengthOffset,
src->address() + ConstantPoolArray::kLengthOffset,
- ConstantPoolArray::SizeFor(int64_entries, ptr_entries, int32_entries)
- - ConstantPoolArray::kLengthOffset);
+ size - ConstantPoolArray::kLengthOffset);
return obj;
}
MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
if (length < 0 || length > FixedArray::kMaxLength) {
- return Failure::OutOfMemoryException(0xe);
+ v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
}
int size = FixedArray::SizeFor(length);
AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
@@ -5232,7 +5376,7 @@ MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
PretenureFlag pretenure) {
if (length < 0 || length > FixedDoubleArray::kMaxLength) {
- return Failure::OutOfMemoryException(0xf);
+ v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
}
int size = FixedDoubleArray::SizeFor(length);
#ifndef V8_HOST_ARCH_64_BIT
@@ -5250,12 +5394,14 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries,
- int number_of_ptr_entries,
+ int number_of_code_ptr_entries,
+ int number_of_heap_ptr_entries,
int number_of_int32_entries) {
- ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
- number_of_int32_entries > 0);
+ ASSERT(number_of_int64_entries > 0 || number_of_code_ptr_entries > 0 ||
+ number_of_heap_ptr_entries > 0 || number_of_int32_entries > 0);
int size = ConstantPoolArray::SizeFor(number_of_int64_entries,
- number_of_ptr_entries,
+ number_of_code_ptr_entries,
+ number_of_heap_ptr_entries,
number_of_int32_entries);
#ifndef V8_HOST_ARCH_64_BIT
size += kPointerSize;
@@ -5272,29 +5418,38 @@ MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries,
ConstantPoolArray* constant_pool =
reinterpret_cast<ConstantPoolArray*>(object);
constant_pool->SetEntryCounts(number_of_int64_entries,
- number_of_ptr_entries,
+ number_of_code_ptr_entries,
+ number_of_heap_ptr_entries,
number_of_int32_entries);
- if (number_of_ptr_entries > 0) {
+ if (number_of_code_ptr_entries > 0) {
+ int offset =
+ constant_pool->OffsetOfElementAt(constant_pool->first_code_ptr_index());
+ MemsetPointer(
+ reinterpret_cast<Address*>(HeapObject::RawField(constant_pool, offset)),
+ isolate()->builtins()->builtin(Builtins::kIllegal)->entry(),
+ number_of_code_ptr_entries);
+ }
+ if (number_of_heap_ptr_entries > 0) {
+ int offset =
+ constant_pool->OffsetOfElementAt(constant_pool->first_heap_ptr_index());
MemsetPointer(
- HeapObject::RawField(
- constant_pool,
- constant_pool->OffsetOfElementAt(constant_pool->first_ptr_index())),
+ HeapObject::RawField(constant_pool, offset),
undefined_value(),
- number_of_ptr_entries);
+ number_of_heap_ptr_entries);
}
return constant_pool;
}
MaybeObject* Heap::AllocateEmptyConstantPoolArray() {
- int size = ConstantPoolArray::SizeFor(0, 0, 0);
+ int size = ConstantPoolArray::SizeFor(0, 0, 0, 0);
Object* result;
{ MaybeObject* maybe_result =
AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
HeapObject::cast(result)->set_map_no_write_barrier(constant_pool_array_map());
- ConstantPoolArray::cast(result)->SetEntryCounts(0, 0, 0);
+ ConstantPoolArray::cast(result)->SetEntryCounts(0, 0, 0, 0);
return result;
}
@@ -5826,6 +5981,9 @@ void Heap::Verify() {
VerifyPointersVisitor visitor;
IterateRoots(&visitor, VISIT_ONLY_STRONG);
+ VerifySmisVisitor smis_visitor;
+ IterateSmiRoots(&smis_visitor);
+
new_space_.Verify();
old_pointer_space_->Verify(&visitor);
@@ -6123,6 +6281,14 @@ void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
}
+void Heap::IterateSmiRoots(ObjectVisitor* v) {
+ // Acquire execution access since we are going to read stack limit values.
+ ExecutionAccess access(isolate());
+ v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
+ v->Synchronize(VisitorSynchronization::kSmiRootList);
+}
+
+
void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
@@ -6345,7 +6511,7 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() {
bool Heap::AdvanceSweepers(int step_size) {
- ASSERT(isolate()->num_sweeper_threads() == 0);
+ ASSERT(!mark_compact_collector()->AreSweeperThreadsActivated());
bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
return sweeping_complete;
@@ -6499,8 +6665,6 @@ bool Heap::SetUp() {
mark_compact_collector()->SetUp();
- if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
-
return true;
}
@@ -6642,9 +6806,6 @@ void Heap::TearDown() {
incremental_marking()->TearDown();
isolate_->memory_allocator()->TearDown();
-
- delete relocation_mutex_;
- relocation_mutex_ = NULL;
}
@@ -7386,8 +7547,9 @@ GCTracer::~GCTracer() {
PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
- PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
- PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
+ PrintF("sweep=%.2f ", scopes_[Scope::MC_SWEEP]);
+ PrintF("sweepns=%.2f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
+ PrintF("sweepos=%.2f ", scopes_[Scope::MC_SWEEP_OLDSPACE]);
PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
@@ -7518,7 +7680,7 @@ void DescriptorLookupCache::Clear() {
void Heap::GarbageCollectionGreedyCheck() {
ASSERT(FLAG_gc_greedy);
if (isolate_->bootstrapper()->IsActive()) return;
- if (disallow_allocation_failure()) return;
+ if (!AllowAllocationFailure::IsAllowed(isolate_)) return;
CollectGarbage(NEW_SPACE);
}
#endif
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 266cdb968..0f586e928 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -78,7 +78,6 @@ namespace internal {
V(ByteArray, empty_byte_array, EmptyByteArray) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray) \
- V(Smi, stack_limit, StackLimit) \
V(Oddball, arguments_marker, ArgumentsMarker) \
/* The roots above this line should be boring from a GC point of view. */ \
/* This means they are never in new space and never on a page that is */ \
@@ -165,7 +164,17 @@ namespace internal {
V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
- V(Map, non_strict_arguments_elements_map, NonStrictArgumentsElementsMap) \
+ V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
+ V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \
+ V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \
+ V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array) \
+ V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \
+ V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \
+ V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \
+ V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \
+ V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \
+ EmptyFixedUint8ClampedArray) \
+ V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
V(Map, function_context_map, FunctionContextMap) \
V(Map, catch_context_map, CatchContextMap) \
V(Map, with_context_map, WithContextMap) \
@@ -186,27 +195,37 @@ namespace internal {
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
- V(Smi, last_script_id, LastScriptId) \
V(Script, empty_script, EmptyScript) \
- V(Smi, real_stack_limit, RealStackLimit) \
V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
- V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
- V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
- V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
- V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
V(Cell, undefined_cell, UndefineCell) \
V(JSObject, observation_state, ObservationState) \
V(Map, external_map, ExternalMap) \
+ V(Object, symbol_registry, SymbolRegistry) \
V(Symbol, frozen_symbol, FrozenSymbol) \
+ V(Symbol, nonexistent_symbol, NonExistentSymbol) \
V(Symbol, elements_transition_symbol, ElementsTransitionSymbol) \
V(SeededNumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
V(Symbol, observed_symbol, ObservedSymbol) \
+ V(Symbol, uninitialized_symbol, UninitializedSymbol) \
+ V(Symbol, megamorphic_symbol, MegamorphicSymbol) \
V(FixedArray, materialized_objects, MaterializedObjects) \
- V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad)
+ V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \
+ V(JSObject, microtask_state, MicrotaskState)
+
+// Entries in this list are limited to Smis and are not visited during GC.
+#define SMI_ROOT_LIST(V) \
+ V(Smi, stack_limit, StackLimit) \
+ V(Smi, real_stack_limit, RealStackLimit) \
+ V(Smi, last_script_id, LastScriptId) \
+ V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
+ V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
+ V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
+ V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
+ SMI_ROOT_LIST(V) \
V(StringTable, string_table, StringTable)
// Heap roots that are known to be immortal immovable, for which we can safely
@@ -242,7 +261,7 @@ namespace internal {
V(empty_constant_pool_array) \
V(arguments_marker) \
V(symbol_map) \
- V(non_strict_arguments_elements_map) \
+ V(sloppy_arguments_elements_map) \
V(function_context_map) \
V(catch_context_map) \
V(with_context_map) \
@@ -297,6 +316,11 @@ namespace internal {
V(String_string, "String") \
V(symbol_string, "symbol") \
V(Symbol_string, "Symbol") \
+ V(for_string, "for") \
+ V(for_api_string, "for_api") \
+ V(for_intern_string, "for_intern") \
+ V(private_api_string, "private_api") \
+ V(private_intern_string, "private_intern") \
V(Date_string, "Date") \
V(this_string, "this") \
V(to_string_string, "toString") \
@@ -325,10 +349,6 @@ namespace internal {
V(MakeReferenceError_string, "MakeReferenceError") \
V(MakeSyntaxError_string, "MakeSyntaxError") \
V(MakeTypeError_string, "MakeTypeError") \
- V(invalid_lhs_in_assignment_string, "invalid_lhs_in_assignment") \
- V(invalid_lhs_in_for_in_string, "invalid_lhs_in_for_in") \
- V(invalid_lhs_in_postfix_op_string, "invalid_lhs_in_postfix_op") \
- V(invalid_lhs_in_prefix_op_string, "invalid_lhs_in_prefix_op") \
V(illegal_return_string, "illegal_return") \
V(illegal_break_string, "illegal_break") \
V(illegal_continue_string, "illegal_continue") \
@@ -678,14 +698,13 @@ class Heap {
// constructor.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
+ // If allocation_site is non-null, then a memento is emitted after the object
+ // that points to the site.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateJSObject(
JSFunction* constructor,
- PretenureFlag pretenure = NOT_TENURED);
-
- MUST_USE_RESULT MaybeObject* AllocateJSObjectWithAllocationSite(
- JSFunction* constructor,
- Handle<AllocationSite> allocation_site);
+ PretenureFlag pretenure = NOT_TENURED,
+ AllocationSite* allocation_site = NULL);
MUST_USE_RESULT MaybeObject* AllocateJSModule(Context* context,
ScopeInfo* scope_info);
@@ -765,21 +784,21 @@ class Heap {
// Allocates and initializes a new JavaScript object based on a map.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
+ // Passing an allocation site means that a memento will be created that
+ // points to the site.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMap(
- Map* map, PretenureFlag pretenure = NOT_TENURED, bool alloc_props = true);
-
- MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMapWithAllocationSite(
- Map* map, Handle<AllocationSite> allocation_site);
+ Map* map,
+ PretenureFlag pretenure = NOT_TENURED,
+ bool alloc_props = true,
+ AllocationSite* allocation_site = NULL);
// Allocates a heap object based on the map.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* Allocate(Map* map, AllocationSpace space);
-
- MUST_USE_RESULT MaybeObject* AllocateWithAllocationSite(Map* map,
- AllocationSpace space, Handle<AllocationSite> allocation_site);
+ MUST_USE_RESULT MaybeObject* Allocate(Map* map, AllocationSpace space,
+ AllocationSite* allocation_site = NULL);
// Allocates a JS Map in the heap.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -972,6 +991,10 @@ class Heap {
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
MUST_USE_RESULT inline MaybeObject* CopyFixedArray(FixedArray* src);
+ // Make a copy of src and return it. Returns
+ // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ MUST_USE_RESULT MaybeObject* CopyAndTenureFixedCOWArray(FixedArray* src);
+
// Make a copy of src, set the map, and return the copy. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
MUST_USE_RESULT MaybeObject* CopyFixedArrayWithMap(FixedArray* src, Map* map);
@@ -1005,9 +1028,10 @@ class Heap {
PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* AllocateConstantPoolArray(
- int first_int64_index,
- int first_ptr_index,
- int first_int32_index);
+ int number_of_int64_entries,
+ int number_of_code_ptr_entries,
+ int number_of_heap_ptr_entries,
+ int number_of_int32_entries);
// Allocates a fixed double array with uninitialized values. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
@@ -1070,15 +1094,15 @@ class Heap {
Object* prototype,
PretenureFlag pretenure = TENURED);
- // Arguments object size.
- static const int kArgumentsObjectSize =
+ // Sloppy mode arguments object size.
+ static const int kSloppyArgumentsObjectSize =
JSObject::kHeaderSize + 2 * kPointerSize;
// Strict mode arguments has no callee so it is smaller.
- static const int kArgumentsObjectSizeStrict =
+ static const int kStrictArgumentsObjectSize =
JSObject::kHeaderSize + 1 * kPointerSize;
// Indicies for direct access into argument objects.
static const int kArgumentsLengthIndex = 0;
- // callee is only valid in non-strict mode.
+ // callee is only valid in sloppy mode.
static const int kArgumentsCalleeIndex = 1;
// Allocates an arguments object - optionally with an elements array.
@@ -1134,7 +1158,6 @@ class Heap {
int start_position,
int end_position,
Object* script,
- Object* stack_trace,
Object* stack_frames);
// Allocate a new external string object, which is backed by a string
@@ -1164,6 +1187,13 @@ class Heap {
// when shortening objects.
void CreateFillerObjectAt(Address addr, int size);
+ bool CanMoveObjectStart(HeapObject* object);
+
+ enum InvocationMode { FROM_GC, FROM_MUTATOR };
+
+ // Maintain marking consistency for IncrementalMarking.
+ void AdjustLiveBytes(Address address, int by, InvocationMode mode);
+
// Makes a new native code object
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. On success, the pointer to the Code object is stored in the
@@ -1255,10 +1285,6 @@ class Heap {
// Notify the heap that a context has been disposed.
int NotifyContextDisposed();
- // Utility to invoke the scavenger. This is needed in test code to
- // ensure correct callback for weak global handles.
- void PerformScavenge();
-
inline void increment_scan_on_scavenge_pages() {
scan_on_scavenge_pages_++;
if (FLAG_gc_verbose) {
@@ -1347,6 +1373,9 @@ class Heap {
void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
+ // Iterates over entries in the smi roots list. Only interesting to the
+ // serializer/deserializer, since GC does not care about smis.
+ void IterateSmiRoots(ObjectVisitor* v);
// Iterates over all the other roots in the heap.
void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
@@ -1485,10 +1514,6 @@ class Heap {
allocation_timeout_ = timeout;
}
- bool disallow_allocation_failure() {
- return disallow_allocation_failure_;
- }
-
void TracePathToObjectFrom(Object* target, Object* root);
void TracePathToObject(Object* target);
void TracePathToGlobal();
@@ -1501,10 +1526,16 @@ class Heap {
static inline void ScavengePointer(HeapObject** p);
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
+ enum ScratchpadSlotMode {
+ IGNORE_SCRATCHPAD_SLOT,
+ RECORD_SCRATCHPAD_SLOT
+ };
+
// An object may have an AllocationSite associated with it through a trailing
// AllocationMemento. Its feedback should be updated when objects are found
// in the heap.
- static inline void UpdateAllocationSiteFeedback(HeapObject* object);
+ static inline void UpdateAllocationSiteFeedback(
+ HeapObject* object, ScratchpadSlotMode mode);
// Support for partial snapshots. After calling this we have a linear
// space to write objects in each space.
@@ -1582,7 +1613,7 @@ class Heap {
// Implements the corresponding V8 API function.
bool IdleNotification(int hint);
- // Declare all the root indices.
+ // Declare all the root indices. This defines the root list order.
enum RootListIndex {
#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
@@ -1598,8 +1629,14 @@ class Heap {
#undef DECLARE_STRUCT_MAP
kStringTableRootIndex,
+
+#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
+ SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
+#undef ROOT_INDEX_DECLARATION
+
+ kRootListLength,
kStrongRootListLength = kStringTableRootIndex,
- kRootListLength
+ kSmiRootsStart = kStringTableRootIndex + 1
};
STATIC_CHECK(kUndefinedValueRootIndex == Internals::kUndefinedValueRootIndex);
@@ -1628,7 +1665,9 @@ class Heap {
ExternalArrayType array_type);
RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind);
+ RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
ExternalArray* EmptyExternalArrayForMap(Map* map);
+ FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
void RecordStats(HeapStats* stats, bool take_snapshot = false);
@@ -1834,6 +1873,8 @@ class Heap {
return amount_of_external_allocated_memory_;
}
+ void DeoptMarkedAllocationSites();
+
// ObjectStats are kept in two arrays, counts and sizes. Related stats are
// stored in a contiguous linear buffer. Stats groups are stored one after
// another.
@@ -1879,16 +1920,12 @@ class Heap {
class RelocationLock {
public:
explicit RelocationLock(Heap* heap) : heap_(heap) {
- if (FLAG_concurrent_recompilation) {
- heap_->relocation_mutex_->Lock();
- }
+ heap_->relocation_mutex_.Lock();
}
~RelocationLock() {
- if (FLAG_concurrent_recompilation) {
- heap_->relocation_mutex_->Unlock();
- }
+ heap_->relocation_mutex_.Unlock();
}
private:
@@ -1984,10 +2021,6 @@ class Heap {
// variable holds the value indicating the number of allocations
// remain until the next failure and garbage collection.
int allocation_timeout_;
-
- // Do we expect to be able to handle allocation failure at this
- // time?
- bool disallow_allocation_failure_;
#endif // DEBUG
// Indicates that the new space should be kept small due to high promotion
@@ -2120,6 +2153,11 @@ class Heap {
GarbageCollector SelectGarbageCollector(AllocationSpace space,
const char** reason);
+ // Make sure there is a filler value behind the top of the new space
+ // so that the GC does not confuse some unintialized/stale memory
+ // with the allocation memento of the object at the top
+ void EnsureFillerObjectAtTop();
+
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
@@ -2195,6 +2233,10 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateEmptyExternalArray(
ExternalArrayType array_type);
+ // Allocate empty fixed typed array of given type.
+ MUST_USE_RESULT MaybeObject* AllocateEmptyFixedTypedArray(
+ ExternalArrayType array_type);
+
// Allocate empty fixed double array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
@@ -2296,7 +2338,8 @@ class Heap {
void InitializeAllocationSitesScratchpad();
// Adds an allocation site to the scratchpad if there is space left.
- void AddAllocationSiteToScratchpad(AllocationSite* site);
+ void AddAllocationSiteToScratchpad(AllocationSite* site,
+ ScratchpadSlotMode mode);
void UpdateSurvivalRateTrend(int start_new_space_size);
@@ -2489,14 +2532,12 @@ class Heap {
MemoryChunk* chunks_queued_for_free_;
- Mutex* relocation_mutex_;
-#ifdef DEBUG
- bool relocation_mutex_locked_by_optimizer_thread_;
-#endif // DEBUG;
+ Mutex relocation_mutex_;
+
+ int gc_callbacks_depth_;
friend class Factory;
friend class GCTracer;
- friend class DisallowAllocationFailure;
friend class AlwaysAllocateScope;
friend class Page;
friend class Isolate;
@@ -2506,6 +2547,7 @@ class Heap {
#ifdef VERIFY_HEAP
friend class NoWeakObjectVerificationScope;
#endif
+ friend class GCCallbacksScope;
DISALLOW_COPY_AND_ASSIGN(Heap);
};
@@ -2546,26 +2588,15 @@ class HeapStats {
};
-class DisallowAllocationFailure {
- public:
- inline DisallowAllocationFailure();
- inline ~DisallowAllocationFailure();
-
-#ifdef DEBUG
- private:
- bool old_state_;
-#endif
-};
-
-
class AlwaysAllocateScope {
public:
- inline AlwaysAllocateScope();
+ explicit inline AlwaysAllocateScope(Isolate* isolate);
inline ~AlwaysAllocateScope();
private:
// Implicitly disable artificial allocation failures.
- DisallowAllocationFailure disallow_allocation_failure_;
+ Heap* heap_;
+ DisallowAllocationFailure daf_;
};
@@ -2578,6 +2609,18 @@ class NoWeakObjectVerificationScope {
#endif
+class GCCallbacksScope {
+ public:
+ explicit inline GCCallbacksScope(Heap* heap);
+ inline ~GCCallbacksScope();
+
+ inline bool CheckReenter();
+
+ private:
+ Heap* heap_;
+};
+
+
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
// point into the heap to a location that has a map pointer at its first word.
@@ -2589,6 +2632,13 @@ class VerifyPointersVisitor: public ObjectVisitor {
};
+// Verify that all objects are Smis.
+class VerifySmisVisitor: public ObjectVisitor {
+ public:
+ inline void VisitPointers(Object** start, Object** end);
+};
+
+
// Space iterator for iterating over all spaces of the heap. Returns each space
// in turn, and null when it is done.
class AllSpaces BASE_EMBEDDED {
@@ -2829,6 +2879,7 @@ class GCTracer BASE_EMBEDDED {
MC_MARK,
MC_SWEEP,
MC_SWEEP_NEWSPACE,
+ MC_SWEEP_OLDSPACE,
MC_EVACUATE_PAGES,
MC_UPDATE_NEW_TO_NEW_POINTERS,
MC_UPDATE_ROOT_TO_NEW_POINTERS,
diff --git a/deps/v8/src/hydrogen-bce.cc b/deps/v8/src/hydrogen-bce.cc
index c98a03cb5..dd71078a2 100644
--- a/deps/v8/src/hydrogen-bce.cc
+++ b/deps/v8/src/hydrogen-bce.cc
@@ -30,6 +30,7 @@
namespace v8 {
namespace internal {
+
// We try to "factor up" HBoundsCheck instructions towards the root of the
// dominator tree.
// For now we handle checks where the index is like "exp + int32value".
@@ -135,7 +136,7 @@ class BoundsCheckBbData: public ZoneObject {
void UpdateUpperOffsets(HBoundsCheck* check, int32_t offset) {
BoundsCheckBbData* data = FatherInDominatorTree();
while (data != NULL && data->UpperCheck() == check) {
- ASSERT(data->upper_offset_ <= offset);
+ ASSERT(data->upper_offset_ < offset);
data->upper_offset_ = offset;
data = data->FatherInDominatorTree();
}
@@ -173,7 +174,7 @@ class BoundsCheckBbData: public ZoneObject {
keep_new_check = true;
upper_check_ = new_check;
} else {
- TightenCheck(upper_check_, new_check);
+ TightenCheck(upper_check_, new_check, new_offset);
UpdateUpperOffsets(upper_check_, upper_offset_);
}
} else if (new_offset < lower_offset_) {
@@ -182,7 +183,7 @@ class BoundsCheckBbData: public ZoneObject {
keep_new_check = true;
lower_check_ = new_check;
} else {
- TightenCheck(lower_check_, new_check);
+ TightenCheck(lower_check_, new_check, new_offset);
UpdateLowerOffsets(lower_check_, lower_offset_);
}
} else {
@@ -191,12 +192,20 @@ class BoundsCheckBbData: public ZoneObject {
}
if (!keep_new_check) {
+ if (FLAG_trace_bce) {
+ OS::Print("Eliminating check #%d after tightening\n",
+ new_check->id());
+ }
new_check->block()->graph()->isolate()->counters()->
bounds_checks_eliminated()->Increment();
new_check->DeleteAndReplaceWith(new_check->ActualValue());
} else {
HBoundsCheck* first_check = new_check == lower_check_ ? upper_check_
: lower_check_;
+ if (FLAG_trace_bce) {
+ OS::Print("Moving second check #%d after first check #%d\n",
+ new_check->id(), first_check->id());
+ }
// The length is guaranteed to be live at first_check.
ASSERT(new_check->length() == first_check->length());
HInstruction* old_position = new_check->next();
@@ -275,11 +284,16 @@ class BoundsCheckBbData: public ZoneObject {
}
void TightenCheck(HBoundsCheck* original_check,
- HBoundsCheck* tighter_check) {
+ HBoundsCheck* tighter_check,
+ int32_t new_offset) {
ASSERT(original_check->length() == tighter_check->length());
MoveIndexIfNecessary(tighter_check->index(), original_check, tighter_check);
original_check->ReplaceAllUsesWith(original_check->index());
original_check->SetOperandAt(0, tighter_check->index());
+ if (FLAG_trace_bce) {
+ OS::Print("Tightened check #%d with offset %d from #%d\n",
+ original_check->id(), new_offset, tighter_check->id());
+ }
}
DISALLOW_COPY_AND_ASSIGN(BoundsCheckBbData);
@@ -389,11 +403,32 @@ BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
bb_data_list,
NULL);
*data_p = bb_data_list;
+ if (FLAG_trace_bce) {
+ OS::Print("Fresh bounds check data for block #%d: [%d]\n",
+ bb->block_id(), offset);
+ }
} else if (data->OffsetIsCovered(offset)) {
bb->graph()->isolate()->counters()->
bounds_checks_eliminated()->Increment();
+ if (FLAG_trace_bce) {
+ OS::Print("Eliminating bounds check #%d, offset %d is covered\n",
+ check->id(), offset);
+ }
check->DeleteAndReplaceWith(check->ActualValue());
} else if (data->BasicBlock() == bb) {
+ // TODO(jkummerow): I think the following logic would be preferable:
+ // if (data->Basicblock() == bb ||
+ // graph()->use_optimistic_licm() ||
+ // bb->IsLoopSuccessorDominator()) {
+ // data->CoverCheck(check, offset)
+ // } else {
+ // /* add pristine BCBbData like in (data == NULL) case above */
+ // }
+ // Even better would be: distinguish between read-only dominator-imposed
+ // knowledge and modifiable upper/lower checks.
+ // What happens currently is that the first bounds check in a dominated
+ // block will stay around while any further checks are hoisted out,
+ // which doesn't make sense. Investigate/fix this in a future CL.
data->CoverCheck(check, offset);
} else if (graph()->use_optimistic_licm() ||
bb->IsLoopSuccessorDominator()) {
@@ -411,6 +446,10 @@ BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
data->UpperCheck(),
bb_data_list,
data);
+ if (FLAG_trace_bce) {
+ OS::Print("Updated bounds check data for block #%d: [%d - %d]\n",
+ bb->block_id(), new_lower_offset, new_upper_offset);
+ }
table_.Insert(key, bb_data_list, zone());
}
}
diff --git a/deps/v8/src/hydrogen-check-elimination.cc b/deps/v8/src/hydrogen-check-elimination.cc
index e12f14a13..52a549299 100644
--- a/deps/v8/src/hydrogen-check-elimination.cc
+++ b/deps/v8/src/hydrogen-check-elimination.cc
@@ -48,12 +48,12 @@ typedef UniqueSet<Map>* MapSet;
struct HCheckTableEntry {
HValue* object_; // The object being approximated. NULL => invalid entry.
- HValue* check_; // The last check instruction.
- MapSet maps_; // The set of known maps for the object.
+ HInstruction* check_; // The last check instruction.
+ MapSet maps_; // The set of known maps for the object.
};
-// The main datastructure used during check elimination, which stores a
+// The main data structure used during check elimination, which stores a
// set of known maps for each object.
class HCheckTable : public ZoneObject {
public:
@@ -88,6 +88,10 @@ class HCheckTable : public ZoneObject {
ReduceCompareMap(HCompareMap::cast(instr));
break;
}
+ case HValue::kCompareObjectEqAndBranch: {
+ ReduceCompareObjectEqAndBranch(HCompareObjectEqAndBranch::cast(instr));
+ break;
+ }
case HValue::kTransitionElementsKind: {
ReduceTransitionElementsKind(
HTransitionElementsKind::cast(instr));
@@ -103,8 +107,8 @@ class HCheckTable : public ZoneObject {
}
default: {
// If the instruction changes maps uncontrollably, drop everything.
- if (instr->CheckGVNFlag(kChangesMaps) ||
- instr->CheckGVNFlag(kChangesOsrEntries)) {
+ if (instr->CheckChangesFlag(kMaps) ||
+ instr->CheckChangesFlag(kOsrEntries)) {
Kill();
}
}
@@ -116,39 +120,105 @@ class HCheckTable : public ZoneObject {
return this;
}
- // Global analysis: Copy state to successor block.
+ // Support for global analysis with HFlowEngine: Merge given state with
+ // the other incoming state.
+ static HCheckTable* Merge(HCheckTable* succ_state, HBasicBlock* succ_block,
+ HCheckTable* pred_state, HBasicBlock* pred_block,
+ Zone* zone) {
+ if (pred_state == NULL || pred_block->IsUnreachable()) {
+ return succ_state;
+ }
+ if (succ_state == NULL) {
+ return pred_state->Copy(succ_block, pred_block, zone);
+ } else {
+ return succ_state->Merge(succ_block, pred_state, pred_block, zone);
+ }
+ }
+
+ // Support for global analysis with HFlowEngine: Given state merged with all
+ // the other incoming states, prepare it for use.
+ static HCheckTable* Finish(HCheckTable* state, HBasicBlock* block,
+ Zone* zone) {
+ if (state == NULL) {
+ block->MarkUnreachable();
+ } else if (block->IsUnreachable()) {
+ state = NULL;
+ }
+ if (FLAG_trace_check_elimination) {
+ PrintF("Processing B%d, checkmaps-table:\n", block->block_id());
+ Print(state);
+ }
+ return state;
+ }
+
+ private:
+ // Copy state to successor block.
HCheckTable* Copy(HBasicBlock* succ, HBasicBlock* from_block, Zone* zone) {
HCheckTable* copy = new(phase_->zone()) HCheckTable(phase_);
for (int i = 0; i < size_; i++) {
HCheckTableEntry* old_entry = &entries_[i];
+ ASSERT(old_entry->maps_->size() > 0);
HCheckTableEntry* new_entry = &copy->entries_[i];
- // TODO(titzer): keep the check if this block dominates the successor?
new_entry->object_ = old_entry->object_;
- new_entry->check_ = NULL;
new_entry->maps_ = old_entry->maps_->Copy(phase_->zone());
+ // Keep the check if the existing check's block dominates the successor.
+ if (old_entry->check_ != NULL &&
+ old_entry->check_->block()->Dominates(succ)) {
+ new_entry->check_ = old_entry->check_;
+ } else {
+ // Leave it NULL till we meet a new check instruction for this object
+ // in the control flow.
+ new_entry->check_ = NULL;
+ }
}
copy->cursor_ = cursor_;
copy->size_ = size_;
+ // Create entries for succ block's phis.
+ if (!succ->IsLoopHeader() && succ->phis()->length() > 0) {
+ int pred_index = succ->PredecessorIndexOf(from_block);
+ for (int phi_index = 0;
+ phi_index < succ->phis()->length();
+ ++phi_index) {
+ HPhi* phi = succ->phis()->at(phi_index);
+ HValue* phi_operand = phi->OperandAt(pred_index);
+
+ HCheckTableEntry* pred_entry = copy->Find(phi_operand);
+ if (pred_entry != NULL) {
+ // Create an entry for a phi in the table.
+ copy->Insert(phi, NULL, pred_entry->maps_->Copy(phase_->zone()));
+ }
+ }
+ }
+
// Branch-sensitive analysis for certain comparisons may add more facts
// to the state for the successor on the true branch.
bool learned = false;
- HControlInstruction* end = succ->predecessors()->at(0)->end();
- if (succ->predecessors()->length() == 1 && end->SuccessorAt(0) == succ) {
+ if (succ->predecessors()->length() == 1) {
+ HControlInstruction* end = succ->predecessors()->at(0)->end();
+ bool is_true_branch = end->SuccessorAt(0) == succ;
if (end->IsCompareMap()) {
- // Learn on the true branch of if(CompareMap(x)).
HCompareMap* cmp = HCompareMap::cast(end);
HValue* object = cmp->value()->ActualValue();
HCheckTableEntry* entry = copy->Find(object);
- if (entry == NULL) {
- copy->Insert(object, cmp->map());
+ if (is_true_branch) {
+ // Learn on the true branch of if(CompareMap(x)).
+ if (entry == NULL) {
+ copy->Insert(object, cmp, cmp->map());
+ } else {
+ MapSet list = new(phase_->zone()) UniqueSet<Map>();
+ list->Add(cmp->map(), phase_->zone());
+ entry->maps_ = list;
+ entry->check_ = cmp;
+ }
} else {
- MapSet list = new(phase_->zone()) UniqueSet<Map>();
- list->Add(cmp->map(), phase_->zone());
- entry->maps_ = list;
+ // Learn on the false branch of if(CompareMap(x)).
+ if (entry != NULL) {
+ entry->maps_->Remove(cmp->map());
+ }
}
learned = true;
- } else if (end->IsCompareObjectEqAndBranch()) {
+ } else if (is_true_branch && end->IsCompareObjectEqAndBranch()) {
// Learn on the true branch of if(CmpObjectEq(x, y)).
HCompareObjectEqAndBranch* cmp =
HCompareObjectEqAndBranch::cast(end);
@@ -177,44 +247,54 @@ class HCheckTable : public ZoneObject {
succ->block_id(),
learned ? "learned" : "copied",
from_block->block_id());
- copy->Print();
+ Print(copy);
}
return copy;
}
- // Global analysis: Merge this state with the other incoming state.
+ // Merge this state with the other incoming state.
HCheckTable* Merge(HBasicBlock* succ, HCheckTable* that,
- HBasicBlock* that_block, Zone* zone) {
- if (that_block->IsReachable()) {
- if (that->size_ == 0) {
- // If the other state is empty, simply reset.
- size_ = 0;
- cursor_ = 0;
- } else {
- bool compact = false;
- for (int i = 0; i < size_; i++) {
- HCheckTableEntry* this_entry = &entries_[i];
- HCheckTableEntry* that_entry = that->Find(this_entry->object_);
- if (that_entry == NULL) {
- this_entry->object_ = NULL;
- compact = true;
- } else {
- this_entry->maps_ =
- this_entry->maps_->Union(that_entry->maps_, phase_->zone());
- if (this_entry->check_ != that_entry->check_) {
- this_entry->check_ = NULL;
- }
- ASSERT(this_entry->maps_->size() > 0);
+ HBasicBlock* pred_block, Zone* zone) {
+ if (that->size_ == 0) {
+ // If the other state is empty, simply reset.
+ size_ = 0;
+ cursor_ = 0;
+ } else {
+ int pred_index = succ->PredecessorIndexOf(pred_block);
+ bool compact = false;
+ for (int i = 0; i < size_; i++) {
+ HCheckTableEntry* this_entry = &entries_[i];
+ HCheckTableEntry* that_entry;
+ if (this_entry->object_->IsPhi() &&
+ this_entry->object_->block() == succ) {
+ HPhi* phi = HPhi::cast(this_entry->object_);
+ HValue* phi_operand = phi->OperandAt(pred_index);
+ that_entry = that->Find(phi_operand);
+
+ } else {
+ that_entry = that->Find(this_entry->object_);
+ }
+
+ if (that_entry == NULL) {
+ this_entry->object_ = NULL;
+ compact = true;
+ } else {
+ this_entry->maps_ =
+ this_entry->maps_->Union(that_entry->maps_, phase_->zone());
+ if (this_entry->check_ != that_entry->check_) {
+ this_entry->check_ = NULL;
}
+ ASSERT(this_entry->maps_->size() > 0);
}
- if (compact) Compact();
}
+ if (compact) Compact();
}
+
if (FLAG_trace_check_elimination) {
PrintF("B%d checkmaps-table merged with B%d table:\n",
- succ->block_id(), that_block->block_id());
- Print();
+ succ->block_id(), pred_block->block_id());
+ Print(this);
}
return this;
}
@@ -244,14 +324,43 @@ class HCheckTable : public ZoneObject {
}
return;
}
- i = i->Intersect(a, phase_->zone());
- if (i->size() == 0) {
+ MapSet intersection = i->Intersect(a, phase_->zone());
+ if (intersection->size() == 0) {
// Intersection is empty; probably megamorphic, which is likely to
// deopt anyway, so just leave things as they are.
INC_STAT(empty_);
} else {
- // TODO(titzer): replace the first check with a more strict check
- INC_STAT(narrowed_);
+ // Update set of maps in the entry.
+ entry->maps_ = intersection;
+ if (intersection->size() != i->size()) {
+ // Narrow set of maps in the second check maps instruction.
+ HGraph* graph = instr->block()->graph();
+ if (entry->check_ != NULL &&
+ entry->check_->block() == instr->block() &&
+ entry->check_->IsCheckMaps()) {
+ // There is a check in the same block so replace it with a more
+ // strict check and eliminate the second check entirely.
+ HCheckMaps* check = HCheckMaps::cast(entry->check_);
+ TRACE(("CheckMaps #%d at B%d narrowed\n", check->id(),
+ check->block()->block_id()));
+ // Update map set and ensure that the check is alive.
+ check->set_map_set(intersection, graph->zone());
+ check->ClearFlag(HValue::kIsDead);
+ TRACE(("Replacing redundant CheckMaps #%d at B%d with #%d\n",
+ instr->id(), instr->block()->block_id(), entry->check_->id()));
+ instr->DeleteAndReplaceWith(entry->check_);
+ } else {
+ TRACE(("CheckMaps #%d at B%d narrowed\n", instr->id(),
+ instr->block()->block_id()));
+ instr->set_map_set(intersection, graph->zone());
+ entry->check_ = instr;
+ }
+
+ if (FLAG_trace_check_elimination) {
+ Print(this);
+ }
+ INC_STAT(narrowed_);
+ }
}
} else {
// No entry; insert a new one.
@@ -292,22 +401,32 @@ class HCheckTable : public ZoneObject {
HValue* object = instr->value()->ActualValue();
// Match a HCheckMapValue(object, HConstant(map))
Unique<Map> map = MapConstant(instr->map());
- MapSet maps = FindMaps(object);
- if (maps != NULL) {
+
+ HCheckTableEntry* entry = Find(object);
+ if (entry != NULL) {
+ MapSet maps = entry->maps_;
if (maps->Contains(map)) {
if (maps->size() == 1) {
// Object is known to have exactly this map.
- instr->DeleteAndReplaceWith(NULL);
+ if (entry->check_ != NULL) {
+ instr->DeleteAndReplaceWith(entry->check_);
+ } else {
+ // Mark check as dead but leave it in the graph as a checkpoint for
+ // subsequent checks.
+ instr->SetFlag(HValue::kIsDead);
+ entry->check_ = instr;
+ }
INC_STAT(removed_);
} else {
// Only one map survives the check.
maps->Clear();
maps->Add(map, phase_->zone());
+ entry->check_ = instr;
}
}
} else {
// No prior information.
- Insert(object, map);
+ Insert(object, instr, map);
}
}
@@ -324,34 +443,61 @@ class HCheckTable : public ZoneObject {
if (instr->has_transition()) {
// This store transitions the object to a new map.
Kill(object);
- Insert(object, MapConstant(instr->transition()));
+ Insert(object, NULL, MapConstant(instr->transition()));
} else if (IsMapAccess(instr->access())) {
// This is a store directly to the map field of the object.
Kill(object);
if (!instr->value()->IsConstant()) return;
- Insert(object, MapConstant(instr->value()));
+ Insert(object, NULL, MapConstant(instr->value()));
} else {
// If the instruction changes maps, it should be handled above.
- CHECK(!instr->CheckGVNFlag(kChangesMaps));
+ CHECK(!instr->CheckChangesFlag(kMaps));
}
}
void ReduceCompareMap(HCompareMap* instr) {
MapSet maps = FindMaps(instr->value()->ActualValue());
if (maps == NULL) return;
+
+ int succ;
if (maps->Contains(instr->map())) {
- if (maps->size() == 1) {
- TRACE(("Marking redundant CompareMap #%d at B%d as true\n",
- instr->id(), instr->block()->block_id()));
- instr->set_known_successor_index(0);
- INC_STAT(compares_true_);
+ if (maps->size() != 1) {
+ TRACE(("CompareMap #%d for #%d at B%d can't be eliminated: "
+ "ambiguous set of maps\n", instr->id(), instr->value()->id(),
+ instr->block()->block_id()));
+ return;
}
+ succ = 0;
+ INC_STAT(compares_true_);
} else {
- TRACE(("Marking redundant CompareMap #%d at B%d as false\n",
- instr->id(), instr->block()->block_id()));
- instr->set_known_successor_index(1);
+ succ = 1;
INC_STAT(compares_false_);
}
+
+ TRACE(("Marking redundant CompareMap #%d for #%d at B%d as %s\n",
+ instr->id(), instr->value()->id(), instr->block()->block_id(),
+ succ == 0 ? "true" : "false"));
+ instr->set_known_successor_index(succ);
+
+ int unreachable_succ = 1 - succ;
+ instr->block()->MarkSuccEdgeUnreachable(unreachable_succ);
+ }
+
+ void ReduceCompareObjectEqAndBranch(HCompareObjectEqAndBranch* instr) {
+ MapSet maps_left = FindMaps(instr->left()->ActualValue());
+ if (maps_left == NULL) return;
+ MapSet maps_right = FindMaps(instr->right()->ActualValue());
+ if (maps_right == NULL) return;
+ MapSet intersection = maps_left->Intersect(maps_right, phase_->zone());
+ if (intersection->size() > 0) return;
+
+ TRACE(("Marking redundant CompareObjectEqAndBranch #%d at B%d as false\n",
+ instr->id(), instr->block()->block_id()));
+ int succ = 1;
+ instr->set_known_successor_index(succ);
+
+ int unreachable_succ = 1 - succ;
+ instr->block()->MarkSuccEdgeUnreachable(unreachable_succ);
}
void ReduceTransitionElementsKind(HTransitionElementsKind* instr) {
@@ -422,11 +568,17 @@ class HCheckTable : public ZoneObject {
cursor_ = size_; // Move cursor to end.
}
- void Print() {
- for (int i = 0; i < size_; i++) {
- HCheckTableEntry* entry = &entries_[i];
+ static void Print(HCheckTable* table) {
+ if (table == NULL) {
+ PrintF(" unreachable\n");
+ return;
+ }
+
+ for (int i = 0; i < table->size_; i++) {
+ HCheckTableEntry* entry = &table->entries_[i];
ASSERT(entry->object_ != NULL);
- PrintF(" checkmaps-table @%d: object #%d ", i, entry->object_->id());
+ PrintF(" checkmaps-table @%d: %s #%d ", i,
+ entry->object_->IsPhi() ? "phi" : "object", entry->object_->id());
if (entry->check_ != NULL) {
PrintF("check #%d ", entry->check_->id());
}
@@ -440,7 +592,6 @@ class HCheckTable : public ZoneObject {
}
}
- private:
HCheckTableEntry* Find(HValue* object) {
for (int i = size_ - 1; i >= 0; i--) {
// Search from most-recently-inserted to least-recently-inserted.
@@ -456,13 +607,13 @@ class HCheckTable : public ZoneObject {
return entry == NULL ? NULL : entry->maps_;
}
- void Insert(HValue* object, Unique<Map> map) {
+ void Insert(HValue* object, HInstruction* check, Unique<Map> map) {
MapSet list = new(phase_->zone()) UniqueSet<Map>();
list->Add(map, phase_->zone());
- Insert(object, NULL, list);
+ Insert(object, check, list);
}
- void Insert(HValue* object, HCheckMaps* check, MapSet maps) {
+ void Insert(HValue* object, HInstruction* check, MapSet maps) {
HCheckTableEntry* entry = &entries_[cursor_++];
entry->object_ = object;
entry->check_ = check;
@@ -481,6 +632,7 @@ class HCheckTable : public ZoneObject {
}
friend class HCheckMapsEffects;
+ friend class HCheckEliminationPhase;
HCheckEliminationPhase* phase_;
HCheckTableEntry entries_[kMaxTrackedObjects];
@@ -514,8 +666,8 @@ class HCheckMapsEffects : public ZoneObject {
maps_stored_ = true;
}
default: {
- maps_stored_ |= (instr->CheckGVNFlag(kChangesMaps) |
- instr->CheckGVNFlag(kChangesElementsKind));
+ maps_stored_ |= (instr->CheckChangesFlag(kMaps) |
+ instr->CheckChangesFlag(kElementsKind));
}
}
}
diff --git a/deps/v8/src/hydrogen-flow-engine.h b/deps/v8/src/hydrogen-flow-engine.h
index fe786a5c5..99a2f841a 100644
--- a/deps/v8/src/hydrogen-flow-engine.h
+++ b/deps/v8/src/hydrogen-flow-engine.h
@@ -122,9 +122,10 @@ class HFlowEngine {
// Skip blocks not dominated by the root node.
if (SkipNonDominatedBlock(root, block)) continue;
- State* state = StateAt(block);
+ State* state = State::Finish(StateAt(block), block, zone_);
if (block->IsReachable()) {
+ ASSERT(state != NULL);
if (block->IsLoopHeader()) {
// Apply loop effects before analyzing loop body.
ComputeLoopEffects(block)->Apply(state);
@@ -144,18 +145,14 @@ class HFlowEngine {
for (int i = 0; i < max; i++) {
HBasicBlock* succ = block->end()->SuccessorAt(i);
IncrementPredecessorCount(succ);
- if (StateAt(succ) == NULL) {
- // This is the first state to reach the successor.
- if (max == 1 && succ->predecessors()->length() == 1) {
- // Optimization: successor can inherit this state.
- SetStateAt(succ, state);
- } else {
- // Successor needs a copy of the state.
- SetStateAt(succ, state->Copy(succ, block, zone_));
- }
+
+ if (max == 1 && succ->predecessors()->length() == 1) {
+ // Optimization: successor can inherit this state.
+ SetStateAt(succ, state);
} else {
// Merge the current state with the state already at the successor.
- SetStateAt(succ, StateAt(succ)->Merge(succ, state, block, zone_));
+ SetStateAt(succ,
+ State::Merge(StateAt(succ), succ, state, block, zone_));
}
}
}
diff --git a/deps/v8/src/hydrogen-gvn.cc b/deps/v8/src/hydrogen-gvn.cc
index bc836890b..4c98015be 100644
--- a/deps/v8/src/hydrogen-gvn.cc
+++ b/deps/v8/src/hydrogen-gvn.cc
@@ -32,39 +32,39 @@
namespace v8 {
namespace internal {
-class HValueMap: public ZoneObject {
+class HInstructionMap V8_FINAL : public ZoneObject {
public:
- explicit HValueMap(Zone* zone)
+ HInstructionMap(Zone* zone, SideEffectsTracker* side_effects_tracker)
: array_size_(0),
lists_size_(0),
count_(0),
- present_flags_(0),
array_(NULL),
lists_(NULL),
- free_list_head_(kNil) {
+ free_list_head_(kNil),
+ side_effects_tracker_(side_effects_tracker) {
ResizeLists(kInitialSize, zone);
Resize(kInitialSize, zone);
}
- void Kill(GVNFlagSet flags);
+ void Kill(SideEffects side_effects);
- void Add(HValue* value, Zone* zone) {
- present_flags_.Add(value->gvn_flags());
- Insert(value, zone);
+ void Add(HInstruction* instr, Zone* zone) {
+ present_depends_on_.Add(side_effects_tracker_->ComputeDependsOn(instr));
+ Insert(instr, zone);
}
- HValue* Lookup(HValue* value) const;
+ HInstruction* Lookup(HInstruction* instr) const;
- HValueMap* Copy(Zone* zone) const {
- return new(zone) HValueMap(zone, this);
+ HInstructionMap* Copy(Zone* zone) const {
+ return new(zone) HInstructionMap(zone, this);
}
bool IsEmpty() const { return count_ == 0; }
private:
- // A linked list of HValue* values. Stored in arrays.
- struct HValueMapListElement {
- HValue* value;
+ // A linked list of HInstruction* values. Stored in arrays.
+ struct HInstructionMapListElement {
+ HInstruction* instr;
int next; // Index in the array of the next list element.
};
static const int kNil = -1; // The end of a linked list
@@ -72,34 +72,36 @@ class HValueMap: public ZoneObject {
// Must be a power of 2.
static const int kInitialSize = 16;
- HValueMap(Zone* zone, const HValueMap* other);
+ HInstructionMap(Zone* zone, const HInstructionMap* other);
void Resize(int new_size, Zone* zone);
void ResizeLists(int new_size, Zone* zone);
- void Insert(HValue* value, Zone* zone);
+ void Insert(HInstruction* instr, Zone* zone);
uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
int array_size_;
int lists_size_;
- int count_; // The number of values stored in the HValueMap.
- GVNFlagSet present_flags_; // All flags that are in any value in the
- // HValueMap.
- HValueMapListElement* array_; // Primary store - contains the first value
+ int count_; // The number of values stored in the HInstructionMap.
+ SideEffects present_depends_on_;
+ HInstructionMapListElement* array_;
+ // Primary store - contains the first value
// with a given hash. Colliding elements are stored in linked lists.
- HValueMapListElement* lists_; // The linked lists containing hash collisions.
+ HInstructionMapListElement* lists_;
+ // The linked lists containing hash collisions.
int free_list_head_; // Unused elements in lists_ are on the free list.
+ SideEffectsTracker* side_effects_tracker_;
};
-class HSideEffectMap BASE_EMBEDDED {
+class HSideEffectMap V8_FINAL BASE_EMBEDDED {
public:
HSideEffectMap();
explicit HSideEffectMap(HSideEffectMap* other);
HSideEffectMap& operator= (const HSideEffectMap& other);
- void Kill(GVNFlagSet flags);
+ void Kill(SideEffects side_effects);
- void Store(GVNFlagSet flags, HInstruction* instr);
+ void Store(SideEffects side_effects, HInstruction* instr);
bool IsEmpty() const { return count_ == 0; }
@@ -152,35 +154,36 @@ void TraceGVN(const char* msg, ...) {
}
-HValueMap::HValueMap(Zone* zone, const HValueMap* other)
+HInstructionMap::HInstructionMap(Zone* zone, const HInstructionMap* other)
: array_size_(other->array_size_),
lists_size_(other->lists_size_),
count_(other->count_),
- present_flags_(other->present_flags_),
- array_(zone->NewArray<HValueMapListElement>(other->array_size_)),
- lists_(zone->NewArray<HValueMapListElement>(other->lists_size_)),
- free_list_head_(other->free_list_head_) {
+ present_depends_on_(other->present_depends_on_),
+ array_(zone->NewArray<HInstructionMapListElement>(other->array_size_)),
+ lists_(zone->NewArray<HInstructionMapListElement>(other->lists_size_)),
+ free_list_head_(other->free_list_head_),
+ side_effects_tracker_(other->side_effects_tracker_) {
OS::MemCopy(
- array_, other->array_, array_size_ * sizeof(HValueMapListElement));
+ array_, other->array_, array_size_ * sizeof(HInstructionMapListElement));
OS::MemCopy(
- lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
+ lists_, other->lists_, lists_size_ * sizeof(HInstructionMapListElement));
}
-void HValueMap::Kill(GVNFlagSet flags) {
- GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(flags);
- if (!present_flags_.ContainsAnyOf(depends_flags)) return;
- present_flags_.RemoveAll();
+void HInstructionMap::Kill(SideEffects changes) {
+ if (!present_depends_on_.ContainsAnyOf(changes)) return;
+ present_depends_on_.RemoveAll();
for (int i = 0; i < array_size_; ++i) {
- HValue* value = array_[i].value;
- if (value != NULL) {
+ HInstruction* instr = array_[i].instr;
+ if (instr != NULL) {
// Clear list of collisions first, so we know if it becomes empty.
int kept = kNil; // List of kept elements.
int next;
for (int current = array_[i].next; current != kNil; current = next) {
next = lists_[current].next;
- HValue* value = lists_[current].value;
- if (value->gvn_flags().ContainsAnyOf(depends_flags)) {
+ HInstruction* instr = lists_[current].instr;
+ SideEffects depends_on = side_effects_tracker_->ComputeDependsOn(instr);
+ if (depends_on.ContainsAnyOf(changes)) {
// Drop it.
count_--;
lists_[current].next = free_list_head_;
@@ -189,40 +192,41 @@ void HValueMap::Kill(GVNFlagSet flags) {
// Keep it.
lists_[current].next = kept;
kept = current;
- present_flags_.Add(value->gvn_flags());
+ present_depends_on_.Add(depends_on);
}
}
array_[i].next = kept;
// Now possibly drop directly indexed element.
- value = array_[i].value;
- if (value->gvn_flags().ContainsAnyOf(depends_flags)) { // Drop it.
+ instr = array_[i].instr;
+ SideEffects depends_on = side_effects_tracker_->ComputeDependsOn(instr);
+ if (depends_on.ContainsAnyOf(changes)) { // Drop it.
count_--;
int head = array_[i].next;
if (head == kNil) {
- array_[i].value = NULL;
+ array_[i].instr = NULL;
} else {
- array_[i].value = lists_[head].value;
+ array_[i].instr = lists_[head].instr;
array_[i].next = lists_[head].next;
lists_[head].next = free_list_head_;
free_list_head_ = head;
}
} else {
- present_flags_.Add(value->gvn_flags()); // Keep it.
+ present_depends_on_.Add(depends_on); // Keep it.
}
}
}
}
-HValue* HValueMap::Lookup(HValue* value) const {
- uint32_t hash = static_cast<uint32_t>(value->Hashcode());
+HInstruction* HInstructionMap::Lookup(HInstruction* instr) const {
+ uint32_t hash = static_cast<uint32_t>(instr->Hashcode());
uint32_t pos = Bound(hash);
- if (array_[pos].value != NULL) {
- if (array_[pos].value->Equals(value)) return array_[pos].value;
+ if (array_[pos].instr != NULL) {
+ if (array_[pos].instr->Equals(instr)) return array_[pos].instr;
int next = array_[pos].next;
while (next != kNil) {
- if (lists_[next].value->Equals(value)) return lists_[next].value;
+ if (lists_[next].instr->Equals(instr)) return lists_[next].instr;
next = lists_[next].next;
}
}
@@ -230,7 +234,7 @@ HValue* HValueMap::Lookup(HValue* value) const {
}
-void HValueMap::Resize(int new_size, Zone* zone) {
+void HInstructionMap::Resize(int new_size, Zone* zone) {
ASSERT(new_size > count_);
// Hashing the values into the new array has no more collisions than in the
// old hash map, so we can use the existing lists_ array, if we are careful.
@@ -240,33 +244,33 @@ void HValueMap::Resize(int new_size, Zone* zone) {
ResizeLists(lists_size_ << 1, zone);
}
- HValueMapListElement* new_array =
- zone->NewArray<HValueMapListElement>(new_size);
- memset(new_array, 0, sizeof(HValueMapListElement) * new_size);
+ HInstructionMapListElement* new_array =
+ zone->NewArray<HInstructionMapListElement>(new_size);
+ memset(new_array, 0, sizeof(HInstructionMapListElement) * new_size);
- HValueMapListElement* old_array = array_;
+ HInstructionMapListElement* old_array = array_;
int old_size = array_size_;
int old_count = count_;
count_ = 0;
- // Do not modify present_flags_. It is currently correct.
+ // Do not modify present_depends_on_. It is currently correct.
array_size_ = new_size;
array_ = new_array;
if (old_array != NULL) {
// Iterate over all the elements in lists, rehashing them.
for (int i = 0; i < old_size; ++i) {
- if (old_array[i].value != NULL) {
+ if (old_array[i].instr != NULL) {
int current = old_array[i].next;
while (current != kNil) {
- Insert(lists_[current].value, zone);
+ Insert(lists_[current].instr, zone);
int next = lists_[current].next;
lists_[current].next = free_list_head_;
free_list_head_ = current;
current = next;
}
- // Rehash the directly stored value.
- Insert(old_array[i].value, zone);
+ // Rehash the directly stored instruction.
+ Insert(old_array[i].instr, zone);
}
}
}
@@ -275,21 +279,22 @@ void HValueMap::Resize(int new_size, Zone* zone) {
}
-void HValueMap::ResizeLists(int new_size, Zone* zone) {
+void HInstructionMap::ResizeLists(int new_size, Zone* zone) {
ASSERT(new_size > lists_size_);
- HValueMapListElement* new_lists =
- zone->NewArray<HValueMapListElement>(new_size);
- memset(new_lists, 0, sizeof(HValueMapListElement) * new_size);
+ HInstructionMapListElement* new_lists =
+ zone->NewArray<HInstructionMapListElement>(new_size);
+ memset(new_lists, 0, sizeof(HInstructionMapListElement) * new_size);
- HValueMapListElement* old_lists = lists_;
+ HInstructionMapListElement* old_lists = lists_;
int old_size = lists_size_;
lists_size_ = new_size;
lists_ = new_lists;
if (old_lists != NULL) {
- OS::MemCopy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
+ OS::MemCopy(
+ lists_, old_lists, old_size * sizeof(HInstructionMapListElement));
}
for (int i = old_size; i < lists_size_; ++i) {
lists_[i].next = free_list_head_;
@@ -298,15 +303,15 @@ void HValueMap::ResizeLists(int new_size, Zone* zone) {
}
-void HValueMap::Insert(HValue* value, Zone* zone) {
- ASSERT(value != NULL);
+void HInstructionMap::Insert(HInstruction* instr, Zone* zone) {
+ ASSERT(instr != NULL);
// Resizing when half of the hashtable is filled up.
if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone);
ASSERT(count_ < array_size_);
count_++;
- uint32_t pos = Bound(static_cast<uint32_t>(value->Hashcode()));
- if (array_[pos].value == NULL) {
- array_[pos].value = value;
+ uint32_t pos = Bound(static_cast<uint32_t>(instr->Hashcode()));
+ if (array_[pos].instr == NULL) {
+ array_[pos].instr = instr;
array_[pos].next = kNil;
} else {
if (free_list_head_ == kNil) {
@@ -315,9 +320,9 @@ void HValueMap::Insert(HValue* value, Zone* zone) {
int new_element_pos = free_list_head_;
ASSERT(new_element_pos != kNil);
free_list_head_ = lists_[free_list_head_].next;
- lists_[new_element_pos].value = value;
+ lists_[new_element_pos].instr = instr;
lists_[new_element_pos].next = array_[pos].next;
- ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].value != NULL);
+ ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].instr != NULL);
array_[pos].next = new_element_pos;
}
}
@@ -341,10 +346,9 @@ HSideEffectMap& HSideEffectMap::operator= (const HSideEffectMap& other) {
}
-void HSideEffectMap::Kill(GVNFlagSet flags) {
+void HSideEffectMap::Kill(SideEffects side_effects) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- if (flags.Contains(changes_flag)) {
+ if (side_effects.ContainsFlag(GVNFlagFromInt(i))) {
if (data_[i] != NULL) count_--;
data_[i] = NULL;
}
@@ -352,10 +356,9 @@ void HSideEffectMap::Kill(GVNFlagSet flags) {
}
-void HSideEffectMap::Store(GVNFlagSet flags, HInstruction* instr) {
+void HSideEffectMap::Store(SideEffects side_effects, HInstruction* instr) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- if (flags.Contains(changes_flag)) {
+ if (side_effects.ContainsFlag(GVNFlagFromInt(i))) {
if (data_[i] == NULL) count_++;
data_[i] = instr;
}
@@ -363,6 +366,152 @@ void HSideEffectMap::Store(GVNFlagSet flags, HInstruction* instr) {
}
+SideEffects SideEffectsTracker::ComputeChanges(HInstruction* instr) {
+ int index;
+ SideEffects result(instr->ChangesFlags());
+ if (result.ContainsFlag(kGlobalVars)) {
+ if (instr->IsStoreGlobalCell() &&
+ ComputeGlobalVar(HStoreGlobalCell::cast(instr)->cell(), &index)) {
+ result.RemoveFlag(kGlobalVars);
+ result.AddSpecial(GlobalVar(index));
+ } else {
+ for (index = 0; index < kNumberOfGlobalVars; ++index) {
+ result.AddSpecial(GlobalVar(index));
+ }
+ }
+ }
+ if (result.ContainsFlag(kInobjectFields)) {
+ if (instr->IsStoreNamedField() &&
+ ComputeInobjectField(HStoreNamedField::cast(instr)->access(), &index)) {
+ result.RemoveFlag(kInobjectFields);
+ result.AddSpecial(InobjectField(index));
+ } else {
+ for (index = 0; index < kNumberOfInobjectFields; ++index) {
+ result.AddSpecial(InobjectField(index));
+ }
+ }
+ }
+ return result;
+}
+
+
+SideEffects SideEffectsTracker::ComputeDependsOn(HInstruction* instr) {
+ int index;
+ SideEffects result(instr->DependsOnFlags());
+ if (result.ContainsFlag(kGlobalVars)) {
+ if (instr->IsLoadGlobalCell() &&
+ ComputeGlobalVar(HLoadGlobalCell::cast(instr)->cell(), &index)) {
+ result.RemoveFlag(kGlobalVars);
+ result.AddSpecial(GlobalVar(index));
+ } else {
+ for (index = 0; index < kNumberOfGlobalVars; ++index) {
+ result.AddSpecial(GlobalVar(index));
+ }
+ }
+ }
+ if (result.ContainsFlag(kInobjectFields)) {
+ if (instr->IsLoadNamedField() &&
+ ComputeInobjectField(HLoadNamedField::cast(instr)->access(), &index)) {
+ result.RemoveFlag(kInobjectFields);
+ result.AddSpecial(InobjectField(index));
+ } else {
+ for (index = 0; index < kNumberOfInobjectFields; ++index) {
+ result.AddSpecial(InobjectField(index));
+ }
+ }
+ }
+ return result;
+}
+
+
+void SideEffectsTracker::PrintSideEffectsTo(StringStream* stream,
+ SideEffects side_effects) const {
+ const char* separator = "";
+ stream->Add("[");
+ for (int bit = 0; bit < kNumberOfFlags; ++bit) {
+ GVNFlag flag = GVNFlagFromInt(bit);
+ if (side_effects.ContainsFlag(flag)) {
+ stream->Add(separator);
+ separator = ", ";
+ switch (flag) {
+#define DECLARE_FLAG(Type) \
+ case k##Type: \
+ stream->Add(#Type); \
+ break;
+GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
+GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
+#undef DECLARE_FLAG
+ default:
+ break;
+ }
+ }
+ }
+ for (int index = 0; index < num_global_vars_; ++index) {
+ if (side_effects.ContainsSpecial(GlobalVar(index))) {
+ stream->Add(separator);
+ separator = ", ";
+ stream->Add("[%p]", *global_vars_[index].handle());
+ }
+ }
+ for (int index = 0; index < num_inobject_fields_; ++index) {
+ if (side_effects.ContainsSpecial(InobjectField(index))) {
+ stream->Add(separator);
+ separator = ", ";
+ inobject_fields_[index].PrintTo(stream);
+ }
+ }
+ stream->Add("]");
+}
+
+
+bool SideEffectsTracker::ComputeGlobalVar(Unique<Cell> cell, int* index) {
+ for (int i = 0; i < num_global_vars_; ++i) {
+ if (cell == global_vars_[i]) {
+ *index = i;
+ return true;
+ }
+ }
+ if (num_global_vars_ < kNumberOfGlobalVars) {
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Tracking global var [%p] (mapped to index %d)\n",
+ *cell.handle(), num_global_vars_);
+ stream.OutputToStdOut();
+ }
+ *index = num_global_vars_;
+ global_vars_[num_global_vars_++] = cell;
+ return true;
+ }
+ return false;
+}
+
+
+bool SideEffectsTracker::ComputeInobjectField(HObjectAccess access,
+ int* index) {
+ for (int i = 0; i < num_inobject_fields_; ++i) {
+ if (access.Equals(inobject_fields_[i])) {
+ *index = i;
+ return true;
+ }
+ }
+ if (num_inobject_fields_ < kNumberOfInobjectFields) {
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Tracking inobject field access ");
+ access.PrintTo(&stream);
+ stream.Add(" (mapped to index %d)\n", num_inobject_fields_);
+ stream.OutputToStdOut();
+ }
+ *index = num_inobject_fields_;
+ inobject_fields_[num_inobject_fields_++] = access;
+ return true;
+ }
+ return false;
+}
+
+
HGlobalValueNumberingPhase::HGlobalValueNumberingPhase(HGraph* graph)
: HPhase("H_Global value numbering", graph),
removed_side_effects_(false),
@@ -370,10 +519,10 @@ HGlobalValueNumberingPhase::HGlobalValueNumberingPhase(HGraph* graph)
loop_side_effects_(graph->blocks()->length(), zone()),
visited_on_paths_(graph->blocks()->length(), zone()) {
ASSERT(!AllowHandleAllocation::IsAllowed());
- block_side_effects_.AddBlock(GVNFlagSet(), graph->blocks()->length(),
- zone());
- loop_side_effects_.AddBlock(GVNFlagSet(), graph->blocks()->length(),
- zone());
+ block_side_effects_.AddBlock(
+ SideEffects(), graph->blocks()->length(), zone());
+ loop_side_effects_.AddBlock(
+ SideEffects(), graph->blocks()->length(), zone());
}
@@ -409,12 +558,12 @@ void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
// Compute side effects for the block.
HBasicBlock* block = graph()->blocks()->at(i);
- GVNFlagSet side_effects;
+ SideEffects side_effects;
if (block->IsReachable() && !block->IsDeoptimizing()) {
int id = block->block_id();
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
- side_effects.Add(instr->ChangesFlags());
+ side_effects.Add(side_effects_tracker_.ComputeChanges(instr));
}
block_side_effects_[id].Add(side_effects);
@@ -438,103 +587,22 @@ void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
}
-SmartArrayPointer<char> GetGVNFlagsString(GVNFlagSet flags) {
- char underlying_buffer[kNumberOfFlags * 128];
- Vector<char> buffer(underlying_buffer, sizeof(underlying_buffer));
-#if DEBUG
- int offset = 0;
- const char* separator = "";
- const char* comma = ", ";
- buffer[0] = 0;
- uint32_t set_depends_on = 0;
- uint32_t set_changes = 0;
- for (int bit = 0; bit < kNumberOfFlags; ++bit) {
- if (flags.Contains(static_cast<GVNFlag>(bit))) {
- if (bit % 2 == 0) {
- set_changes++;
- } else {
- set_depends_on++;
- }
- }
- }
- bool positive_changes = set_changes < (kNumberOfFlags / 2);
- bool positive_depends_on = set_depends_on < (kNumberOfFlags / 2);
- if (set_changes > 0) {
- if (positive_changes) {
- offset += OS::SNPrintF(buffer + offset, "changes [");
- } else {
- offset += OS::SNPrintF(buffer + offset, "changes all except [");
- }
- for (int bit = 0; bit < kNumberOfFlags; ++bit) {
- if (flags.Contains(static_cast<GVNFlag>(bit)) == positive_changes) {
- switch (static_cast<GVNFlag>(bit)) {
-#define DECLARE_FLAG(type) \
- case kChanges##type: \
- offset += OS::SNPrintF(buffer + offset, separator); \
- offset += OS::SNPrintF(buffer + offset, #type); \
- separator = comma; \
- break;
-GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
-GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
- default:
- break;
- }
- }
- }
- offset += OS::SNPrintF(buffer + offset, "]");
- }
- if (set_depends_on > 0) {
- separator = "";
- if (set_changes > 0) {
- offset += OS::SNPrintF(buffer + offset, ", ");
- }
- if (positive_depends_on) {
- offset += OS::SNPrintF(buffer + offset, "depends on [");
- } else {
- offset += OS::SNPrintF(buffer + offset, "depends on all except [");
- }
- for (int bit = 0; bit < kNumberOfFlags; ++bit) {
- if (flags.Contains(static_cast<GVNFlag>(bit)) == positive_depends_on) {
- switch (static_cast<GVNFlag>(bit)) {
-#define DECLARE_FLAG(type) \
- case kDependsOn##type: \
- offset += OS::SNPrintF(buffer + offset, separator); \
- offset += OS::SNPrintF(buffer + offset, #type); \
- separator = comma; \
- break;
-GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
-GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
- default:
- break;
- }
- }
- }
- offset += OS::SNPrintF(buffer + offset, "]");
- }
-#else
- OS::SNPrintF(buffer, "0x%08X", flags.ToIntegral());
-#endif
- size_t string_len = strlen(underlying_buffer) + 1;
- ASSERT(string_len <= sizeof(underlying_buffer));
- char* result = new char[strlen(underlying_buffer) + 1];
- OS::MemCopy(result, underlying_buffer, string_len);
- return SmartArrayPointer<char>(result);
-}
-
-
void HGlobalValueNumberingPhase::LoopInvariantCodeMotion() {
TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n",
graph()->use_optimistic_licm() ? "yes" : "no");
for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
HBasicBlock* block = graph()->blocks()->at(i);
if (block->IsLoopHeader()) {
- GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
- TRACE_GVN_2("Try loop invariant motion for block B%d %s\n",
- block->block_id(),
- GetGVNFlagsString(side_effects).get());
-
+ SideEffects side_effects = loop_side_effects_[block->block_id()];
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Try loop invariant motion for block B%d changes ",
+ block->block_id());
+ side_effects_tracker_.PrintSideEffectsTo(&stream, side_effects);
+ stream.Add("\n");
+ stream.OutputToStdOut();
+ }
HBasicBlock* last = block->loop_information()->GetLastBackEdge();
for (int j = block->block_id(); j <= last->block_id(); ++j) {
ProcessLoopBlock(graph()->blocks()->at(j), block, side_effects);
@@ -547,22 +615,37 @@ void HGlobalValueNumberingPhase::LoopInvariantCodeMotion() {
void HGlobalValueNumberingPhase::ProcessLoopBlock(
HBasicBlock* block,
HBasicBlock* loop_header,
- GVNFlagSet loop_kills) {
+ SideEffects loop_kills) {
HBasicBlock* pre_header = loop_header->predecessors()->at(0);
- GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
- TRACE_GVN_2("Loop invariant motion for B%d %s\n",
- block->block_id(),
- GetGVNFlagsString(depends_flags).get());
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Loop invariant code motion for B%d depends on ",
+ block->block_id());
+ side_effects_tracker_.PrintSideEffectsTo(&stream, loop_kills);
+ stream.Add("\n");
+ stream.OutputToStdOut();
+ }
HInstruction* instr = block->first();
while (instr != NULL) {
HInstruction* next = instr->next();
if (instr->CheckFlag(HValue::kUseGVN)) {
- TRACE_GVN_4("Checking instruction %d (%s) %s. Loop %s\n",
- instr->id(),
- instr->Mnemonic(),
- GetGVNFlagsString(instr->gvn_flags()).get(),
- GetGVNFlagsString(loop_kills).get());
- bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
+ SideEffects changes = side_effects_tracker_.ComputeChanges(instr);
+ SideEffects depends_on = side_effects_tracker_.ComputeDependsOn(instr);
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Checking instruction i%d (%s) changes ",
+ instr->id(), instr->Mnemonic());
+ side_effects_tracker_.PrintSideEffectsTo(&stream, changes);
+ stream.Add(", depends on ");
+ side_effects_tracker_.PrintSideEffectsTo(&stream, depends_on);
+ stream.Add(". Loop changes ");
+ side_effects_tracker_.PrintSideEffectsTo(&stream, loop_kills);
+ stream.Add("\n");
+ stream.OutputToStdOut();
+ }
+ bool can_hoist = !depends_on.ContainsAnyOf(loop_kills);
if (can_hoist && !graph()->use_optimistic_licm()) {
can_hoist = block->IsLoopSuccessorDominator();
}
@@ -604,10 +687,10 @@ bool HGlobalValueNumberingPhase::ShouldMove(HInstruction* instr,
}
-GVNFlagSet
+SideEffects
HGlobalValueNumberingPhase::CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator, HBasicBlock* dominated) {
- GVNFlagSet side_effects;
+ SideEffects side_effects;
for (int i = 0; i < dominated->predecessors()->length(); ++i) {
HBasicBlock* block = dominated->predecessors()->at(i);
if (dominator->block_id() < block->block_id() &&
@@ -636,13 +719,13 @@ class GvnBasicBlockState: public ZoneObject {
public:
static GvnBasicBlockState* CreateEntry(Zone* zone,
HBasicBlock* entry_block,
- HValueMap* entry_map) {
+ HInstructionMap* entry_map) {
return new(zone)
GvnBasicBlockState(NULL, entry_block, entry_map, NULL, zone);
}
HBasicBlock* block() { return block_; }
- HValueMap* map() { return map_; }
+ HInstructionMap* map() { return map_; }
HSideEffectMap* dominators() { return &dominators_; }
GvnBasicBlockState* next_in_dominator_tree_traversal(
@@ -669,7 +752,7 @@ class GvnBasicBlockState: public ZoneObject {
private:
void Initialize(HBasicBlock* block,
- HValueMap* map,
+ HInstructionMap* map,
HSideEffectMap* dominators,
bool copy_map,
Zone* zone) {
@@ -685,7 +768,7 @@ class GvnBasicBlockState: public ZoneObject {
GvnBasicBlockState(GvnBasicBlockState* previous,
HBasicBlock* block,
- HValueMap* map,
+ HInstructionMap* map,
HSideEffectMap* dominators,
Zone* zone)
: previous_(previous), next_(NULL) {
@@ -732,7 +815,7 @@ class GvnBasicBlockState: public ZoneObject {
GvnBasicBlockState* previous_;
GvnBasicBlockState* next_;
HBasicBlock* block_;
- HValueMap* map_;
+ HInstructionMap* map_;
HSideEffectMap dominators_;
int dominated_index_;
int length_;
@@ -745,13 +828,14 @@ class GvnBasicBlockState: public ZoneObject {
// GvnBasicBlockState instances.
void HGlobalValueNumberingPhase::AnalyzeGraph() {
HBasicBlock* entry_block = graph()->entry_block();
- HValueMap* entry_map = new(zone()) HValueMap(zone());
+ HInstructionMap* entry_map =
+ new(zone()) HInstructionMap(zone(), &side_effects_tracker_);
GvnBasicBlockState* current =
GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map);
while (current != NULL) {
HBasicBlock* block = current->block();
- HValueMap* map = current->map();
+ HInstructionMap* map = current->map();
HSideEffectMap* dominators = current->dominators();
TRACE_GVN_2("Analyzing block B%d%s\n",
@@ -770,17 +854,15 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
HValue* other = dominators->at(i);
- GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
- GVNFlag depends_on_flag = HValue::DependsOnFlagFromInt(i);
- if (instr->DependsOnFlags().Contains(depends_on_flag) &&
- (other != NULL)) {
+ GVNFlag flag = GVNFlagFromInt(i);
+ if (instr->DependsOnFlags().Contains(flag) && other != NULL) {
TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
i,
instr->id(),
instr->Mnemonic(),
other->id(),
other->Mnemonic());
- if (instr->HandleSideEffectDominator(changes_flag, other)) {
+ if (instr->HandleSideEffectDominator(flag, other)) {
removed_side_effects_ = true;
}
}
@@ -789,21 +871,27 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
// Instruction was unlinked during graph traversal.
if (!instr->IsLinked()) continue;
- GVNFlagSet flags = instr->ChangesFlags();
- if (!flags.IsEmpty()) {
+ SideEffects changes = side_effects_tracker_.ComputeChanges(instr);
+ if (!changes.IsEmpty()) {
// Clear all instructions in the map that are affected by side effects.
// Store instruction as the dominating one for tracked side effects.
- map->Kill(flags);
- dominators->Store(flags, instr);
- TRACE_GVN_2("Instruction %d %s\n", instr->id(),
- GetGVNFlagsString(flags).get());
+ map->Kill(changes);
+ dominators->Store(changes, instr);
+ if (FLAG_trace_gvn) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ stream.Add("Instruction i%d changes ", instr->id());
+ side_effects_tracker_.PrintSideEffectsTo(&stream, changes);
+ stream.Add("\n");
+ stream.OutputToStdOut();
+ }
}
if (instr->CheckFlag(HValue::kUseGVN)) {
ASSERT(!instr->HasObservableSideEffects());
- HValue* other = map->Lookup(instr);
+ HInstruction* other = map->Lookup(instr);
if (other != NULL) {
ASSERT(instr->Equals(other) && other->Equals(instr));
- TRACE_GVN_4("Replacing value %d (%s) with value %d (%s)\n",
+ TRACE_GVN_4("Replacing instruction i%d (%s) with i%d (%s)\n",
instr->id(),
instr->Mnemonic(),
other->id(),
@@ -823,7 +911,7 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
if (next != NULL) {
HBasicBlock* dominated = next->block();
- HValueMap* successor_map = next->map();
+ HInstructionMap* successor_map = next->map();
HSideEffectMap* successor_dominators = next->dominators();
// Kill everything killed on any path between this block and the
@@ -834,7 +922,7 @@ void HGlobalValueNumberingPhase::AnalyzeGraph() {
if ((!successor_map->IsEmpty() || !successor_dominators->IsEmpty()) &&
dominator_block->block_id() + 1 < dominated->block_id()) {
visited_on_paths_.Clear();
- GVNFlagSet side_effects_on_all_paths =
+ SideEffects side_effects_on_all_paths =
CollectSideEffectsOnPathsToDominatedBlock(dominator_block,
dominated);
successor_map->Kill(side_effects_on_all_paths);
diff --git a/deps/v8/src/hydrogen-gvn.h b/deps/v8/src/hydrogen-gvn.h
index 30333cca6..d00dd0558 100644
--- a/deps/v8/src/hydrogen-gvn.h
+++ b/deps/v8/src/hydrogen-gvn.h
@@ -36,15 +36,97 @@
namespace v8 {
namespace internal {
+// This class extends GVNFlagSet with additional "special" dynamic side effects,
+// which can be used to represent side effects that cannot be expressed using
+// the GVNFlags of an HInstruction. These special side effects are tracked by a
+// SideEffectsTracker (see below).
+class SideEffects V8_FINAL {
+ public:
+ static const int kNumberOfSpecials = 64 - kNumberOfFlags;
+
+ SideEffects() : bits_(0) {
+ ASSERT(kNumberOfFlags + kNumberOfSpecials == sizeof(bits_) * CHAR_BIT);
+ }
+ explicit SideEffects(GVNFlagSet flags) : bits_(flags.ToIntegral()) {}
+ bool IsEmpty() const { return bits_ == 0; }
+ bool ContainsFlag(GVNFlag flag) const {
+ return (bits_ & MaskFlag(flag)) != 0;
+ }
+ bool ContainsSpecial(int special) const {
+ return (bits_ & MaskSpecial(special)) != 0;
+ }
+ bool ContainsAnyOf(SideEffects set) const { return (bits_ & set.bits_) != 0; }
+ void Add(SideEffects set) { bits_ |= set.bits_; }
+ void AddSpecial(int special) { bits_ |= MaskSpecial(special); }
+ void RemoveFlag(GVNFlag flag) { bits_ &= ~MaskFlag(flag); }
+ void RemoveAll() { bits_ = 0; }
+ uint64_t ToIntegral() const { return bits_; }
+ void PrintTo(StringStream* stream) const;
+
+ private:
+ uint64_t MaskFlag(GVNFlag flag) const {
+ return static_cast<uint64_t>(1) << static_cast<unsigned>(flag);
+ }
+ uint64_t MaskSpecial(int special) const {
+ ASSERT(special >= 0);
+ ASSERT(special < kNumberOfSpecials);
+ return static_cast<uint64_t>(1) << static_cast<unsigned>(
+ special + kNumberOfFlags);
+ }
+
+ uint64_t bits_;
+};
+
+
+// Tracks global variable and inobject field loads/stores in a fine grained
+// fashion, and represents them using the "special" dynamic side effects of the
+// SideEffects class (see above). This way unrelated global variable/inobject
+// field stores don't prevent hoisting and merging of global variable/inobject
+// field loads.
+class SideEffectsTracker V8_FINAL BASE_EMBEDDED {
+ public:
+ SideEffectsTracker() : num_global_vars_(0), num_inobject_fields_(0) {}
+ SideEffects ComputeChanges(HInstruction* instr);
+ SideEffects ComputeDependsOn(HInstruction* instr);
+ void PrintSideEffectsTo(StringStream* stream, SideEffects side_effects) const;
+
+ private:
+ bool ComputeGlobalVar(Unique<Cell> cell, int* index);
+ bool ComputeInobjectField(HObjectAccess access, int* index);
+
+ static int GlobalVar(int index) {
+ ASSERT(index >= 0);
+ ASSERT(index < kNumberOfGlobalVars);
+ return index;
+ }
+ static int InobjectField(int index) {
+ ASSERT(index >= 0);
+ ASSERT(index < kNumberOfInobjectFields);
+ return index + kNumberOfGlobalVars;
+ }
+
+ // Track up to four global vars.
+ static const int kNumberOfGlobalVars = 4;
+ Unique<Cell> global_vars_[kNumberOfGlobalVars];
+ int num_global_vars_;
+
+ // Track up to n inobject fields.
+ static const int kNumberOfInobjectFields =
+ SideEffects::kNumberOfSpecials - kNumberOfGlobalVars;
+ HObjectAccess inobject_fields_[kNumberOfInobjectFields];
+ int num_inobject_fields_;
+};
+
+
// Perform common subexpression elimination and loop-invariant code motion.
-class HGlobalValueNumberingPhase : public HPhase {
+class HGlobalValueNumberingPhase V8_FINAL : public HPhase {
public:
explicit HGlobalValueNumberingPhase(HGraph* graph);
void Run();
private:
- GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
+ SideEffects CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator,
HBasicBlock* dominated);
void AnalyzeGraph();
@@ -52,17 +134,18 @@ class HGlobalValueNumberingPhase : public HPhase {
void LoopInvariantCodeMotion();
void ProcessLoopBlock(HBasicBlock* block,
HBasicBlock* before_loop,
- GVNFlagSet loop_kills);
+ SideEffects loop_kills);
bool AllowCodeMotion();
bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
+ SideEffectsTracker side_effects_tracker_;
bool removed_side_effects_;
// A map of block IDs to their side effects.
- ZoneList<GVNFlagSet> block_side_effects_;
+ ZoneList<SideEffects> block_side_effects_;
// A map of loop header block IDs to their loop's side effects.
- ZoneList<GVNFlagSet> loop_side_effects_;
+ ZoneList<SideEffects> loop_side_effects_;
// Used when collecting side effects on paths from dominator to
// dominated.
@@ -71,7 +154,6 @@ class HGlobalValueNumberingPhase : public HPhase {
DISALLOW_COPY_AND_ASSIGN(HGlobalValueNumberingPhase);
};
-
} } // namespace v8::internal
#endif // V8_HYDROGEN_GVN_H_
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index 2ca0c54a5..84dcb1824 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -30,11 +30,14 @@
#include "double.h"
#include "factory.h"
#include "hydrogen-infer-representation.h"
+#include "property-details-inl.h"
#if V8_TARGET_ARCH_IA32
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/lithium-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -604,11 +607,11 @@ void HValue::PrintChangesTo(StringStream* stream) {
stream->Add("*");
} else {
bool add_comma = false;
-#define PRINT_DO(type) \
- if (changes_flags.Contains(kChanges##type)) { \
- if (add_comma) stream->Add(","); \
- add_comma = true; \
- stream->Add(#type); \
+#define PRINT_DO(Type) \
+ if (changes_flags.Contains(k##Type)) { \
+ if (add_comma) stream->Add(","); \
+ add_comma = true; \
+ stream->Add(#Type); \
}
GVN_TRACKED_FLAG_LIST(PRINT_DO);
GVN_UNTRACKED_FLAG_LIST(PRINT_DO);
@@ -680,6 +683,19 @@ void HValue::ComputeInitialRange(Zone* zone) {
}
+void HSourcePosition::PrintTo(FILE* out) {
+ if (IsUnknown()) {
+ PrintF(out, "<?>");
+ } else {
+ if (FLAG_hydrogen_track_positions) {
+ PrintF(out, "<%d:%d>", inlining_id(), position());
+ } else {
+ PrintF(out, "<0:%d>", raw());
+ }
+ }
+}
+
+
void HInstruction::PrintTo(StringStream* stream) {
PrintMnemonicTo(stream);
PrintDataTo(stream);
@@ -736,8 +752,7 @@ void HInstruction::InsertBefore(HInstruction* next) {
next_ = next;
previous_ = prev;
SetBlock(next->block());
- if (position() == RelocInfo::kNoPosition &&
- next->position() != RelocInfo::kNoPosition) {
+ if (!has_position() && next->has_position()) {
set_position(next->position());
}
}
@@ -774,8 +789,7 @@ void HInstruction::InsertAfter(HInstruction* previous) {
if (block->last() == previous) {
block->set_last(this);
}
- if (position() == RelocInfo::kNoPosition &&
- previous->position() != RelocInfo::kNoPosition) {
+ if (!has_position() && previous->has_position()) {
set_position(previous->position());
}
}
@@ -827,6 +841,107 @@ void HInstruction::Verify() {
#endif
+static bool HasPrimitiveRepresentation(HValue* instr) {
+ return instr->representation().IsInteger32() ||
+ instr->representation().IsDouble();
+}
+
+
+bool HInstruction::CanDeoptimize() {
+ // TODO(titzer): make this a virtual method?
+ switch (opcode()) {
+ case HValue::kAccessArgumentsAt:
+ case HValue::kApplyArguments:
+ case HValue::kArgumentsElements:
+ case HValue::kArgumentsLength:
+ case HValue::kArgumentsObject:
+ case HValue::kBoundsCheckBaseIndexInformation:
+ case HValue::kCapturedObject:
+ case HValue::kClampToUint8:
+ case HValue::kConstant:
+ case HValue::kContext:
+ case HValue::kDateField:
+ case HValue::kDebugBreak:
+ case HValue::kDeclareGlobals:
+ case HValue::kDiv:
+ case HValue::kDummyUse:
+ case HValue::kEnterInlined:
+ case HValue::kEnvironmentMarker:
+ case HValue::kForInCacheArray:
+ case HValue::kForInPrepareMap:
+ case HValue::kFunctionLiteral:
+ case HValue::kGetCachedArrayIndex:
+ case HValue::kGoto:
+ case HValue::kInnerAllocatedObject:
+ case HValue::kInstanceOf:
+ case HValue::kInstanceOfKnownGlobal:
+ case HValue::kInvokeFunction:
+ case HValue::kLeaveInlined:
+ case HValue::kLoadContextSlot:
+ case HValue::kLoadFieldByIndex:
+ case HValue::kLoadFunctionPrototype:
+ case HValue::kLoadGlobalCell:
+ case HValue::kLoadGlobalGeneric:
+ case HValue::kLoadKeyed:
+ case HValue::kLoadKeyedGeneric:
+ case HValue::kLoadNamedField:
+ case HValue::kLoadNamedGeneric:
+ case HValue::kLoadRoot:
+ case HValue::kMapEnumLength:
+ case HValue::kMathFloorOfDiv:
+ case HValue::kMathMinMax:
+ case HValue::kMod:
+ case HValue::kMul:
+ case HValue::kOsrEntry:
+ case HValue::kParameter:
+ case HValue::kPower:
+ case HValue::kPushArgument:
+ case HValue::kRor:
+ case HValue::kSar:
+ case HValue::kSeqStringGetChar:
+ case HValue::kSeqStringSetChar:
+ case HValue::kShl:
+ case HValue::kShr:
+ case HValue::kSimulate:
+ case HValue::kStackCheck:
+ case HValue::kStoreCodeEntry:
+ case HValue::kStoreContextSlot:
+ case HValue::kStoreGlobalCell:
+ case HValue::kStoreKeyed:
+ case HValue::kStoreKeyedGeneric:
+ case HValue::kStoreNamedField:
+ case HValue::kStoreNamedGeneric:
+ case HValue::kStringAdd:
+ case HValue::kStringCharCodeAt:
+ case HValue::kStringCharFromCode:
+ case HValue::kSub:
+ case HValue::kThisFunction:
+ case HValue::kToFastProperties:
+ case HValue::kTransitionElementsKind:
+ case HValue::kTrapAllocationMemento:
+ case HValue::kTypeof:
+ case HValue::kUnaryMathOperation:
+ case HValue::kUseConst:
+ case HValue::kWrapReceiver:
+ return false;
+ case HValue::kForceRepresentation:
+ case HValue::kAdd:
+ case HValue::kBitwise:
+ case HValue::kChange:
+ case HValue::kCompareGeneric:
+ // These instructions might deoptimize if they are not primitive.
+ if (!HasPrimitiveRepresentation(this)) return true;
+ for (int i = 0; i < OperandCount(); i++) {
+ HValue* input = OperandAt(i);
+ if (!HasPrimitiveRepresentation(input)) return true;
+ }
+ return false;
+ default:
+ return true;
+ }
+}
+
+
void HDummyUse::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
}
@@ -1134,6 +1249,7 @@ const char* HUnaryMathOperation::OpName() const {
case kMathExp: return "exp";
case kMathSqrt: return "sqrt";
case kMathPowHalf: return "pow-half";
+ case kMathClz32: return "clz32";
default:
UNREACHABLE();
return NULL;
@@ -1143,6 +1259,7 @@ const char* HUnaryMathOperation::OpName() const {
Range* HUnaryMathOperation::InferRange(Zone* zone) {
Representation r = representation();
+ if (op() == kMathClz32) return new(zone) Range(0, 32);
if (r.IsSmiOrInteger32() && value()->HasRange()) {
if (op() == kMathAbs) {
int upper = value()->range()->upper();
@@ -1200,18 +1317,52 @@ void HHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(" == %o", *type_literal_);
+ stream->Add(" == %o", *type_literal_.handle());
HControlInstruction::PrintDataTo(stream);
}
-bool HTypeofIsAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
- if (value()->representation().IsSpecialization()) {
- if (compares_number_type()) {
- *block = FirstSuccessor();
- } else {
- *block = SecondSuccessor();
+static String* TypeOfString(HConstant* constant, Isolate* isolate) {
+ Heap* heap = isolate->heap();
+ if (constant->HasNumberValue()) return heap->number_string();
+ if (constant->IsUndetectable()) return heap->undefined_string();
+ if (constant->HasStringValue()) return heap->string_string();
+ switch (constant->GetInstanceType()) {
+ case ODDBALL_TYPE: {
+ Unique<Object> unique = constant->GetUnique();
+ if (unique.IsKnownGlobal(heap->true_value()) ||
+ unique.IsKnownGlobal(heap->false_value())) {
+ return heap->boolean_string();
+ }
+ if (unique.IsKnownGlobal(heap->null_value())) {
+ return FLAG_harmony_typeof ? heap->null_string()
+ : heap->object_string();
+ }
+ ASSERT(unique.IsKnownGlobal(heap->undefined_value()));
+ return heap->undefined_string();
}
+ case SYMBOL_TYPE:
+ return heap->symbol_string();
+ case JS_FUNCTION_TYPE:
+ case JS_FUNCTION_PROXY_TYPE:
+ return heap->function_string();
+ default:
+ return heap->object_string();
+ }
+}
+
+
+bool HTypeofIsAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (FLAG_fold_constants && value()->IsConstant()) {
+ HConstant* constant = HConstant::cast(value());
+ String* type_string = TypeOfString(constant, isolate());
+ bool same_type = type_literal_.IsKnownGlobal(type_string);
+ *block = same_type ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ } else if (value()->representation().IsSpecialization()) {
+ bool number_type =
+ type_literal_.IsKnownGlobal(isolate()->heap()->number_string());
+ *block = number_type ? FirstSuccessor() : SecondSuccessor();
return true;
}
*block = NULL;
@@ -1384,19 +1535,19 @@ void HTypeof::PrintDataTo(StringStream* stream) {
HInstruction* HForceRepresentation::New(Zone* zone, HValue* context,
- HValue* value, Representation required_representation) {
+ HValue* value, Representation representation) {
if (FLAG_fold_constants && value->IsConstant()) {
HConstant* c = HConstant::cast(value);
if (c->HasNumberValue()) {
double double_res = c->DoubleValue();
- if (IsInt32Double(double_res)) {
+ if (representation.CanContainDouble(double_res)) {
return HConstant::New(zone, context,
static_cast<int32_t>(double_res),
- required_representation);
+ representation);
}
}
}
- return new(zone) HForceRepresentation(value, required_representation);
+ return new(zone) HForceRepresentation(value, representation);
}
@@ -1516,7 +1667,7 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
bool HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) {
- ASSERT(side_effect == kChangesMaps);
+ ASSERT(side_effect == kMaps);
// TODO(mstarzinger): For now we specialize on HStoreNamedField, but once
// type information is rich enough we should generalize this to any HType
// for which the map is known.
@@ -1524,7 +1675,7 @@ bool HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect,
HStoreNamedField* store = HStoreNamedField::cast(dominator);
if (!store->has_transition() || store->object() != value()) return false;
HConstant* transition = HConstant::cast(store->transition());
- if (map_set_.Contains(transition->GetUnique())) {
+ if (map_set_.Contains(Unique<Map>::cast(transition->GetUnique()))) {
DeleteAndReplaceWith(NULL);
return true;
}
@@ -1552,9 +1703,7 @@ void HCheckValue::PrintDataTo(StringStream* stream) {
HValue* HCheckValue::Canonicalize() {
return (value()->IsConstant() &&
- HConstant::cast(value())->GetUnique() == object_)
- ? NULL
- : this;
+ HConstant::cast(value())->EqualsUnique(object_)) ? NULL : this;
}
@@ -1624,7 +1773,17 @@ Range* HChange::InferRange(Zone* zone) {
input_range != NULL &&
input_range->IsInSmiRange()))) {
set_type(HType::Smi());
- ClearGVNFlag(kChangesNewSpacePromotion);
+ ClearChangesFlag(kNewSpacePromotion);
+ }
+ if (to().IsSmiOrTagged() &&
+ input_range != NULL &&
+ input_range->IsInSmiRange() &&
+ (!SmiValuesAre32Bits() ||
+ !value()->CheckFlag(HValue::kUint32) ||
+ input_range->upper() != kMaxInt)) {
+ // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
+ // interval, so we treat kMaxInt as a sentinel for this entire interval.
+ ClearFlag(kCanOverflow);
}
Range* result = (input_range != NULL)
? input_range->Copy(zone)
@@ -1647,7 +1806,7 @@ Range* HConstant::InferRange(Zone* zone) {
}
-int HPhi::position() const {
+HSourcePosition HPhi::position() const {
return block()->first()->position();
}
@@ -1750,11 +1909,45 @@ Range* HDiv::InferRange(Zone* zone) {
(a->CanBeMinusZero() ||
(a->CanBeZero() && b->CanBeNegative())));
if (!a->Includes(kMinInt) || !b->Includes(-1)) {
- ClearFlag(HValue::kCanOverflow);
+ ClearFlag(kCanOverflow);
}
if (!b->CanBeZero()) {
- ClearFlag(HValue::kCanBeDivByZero);
+ ClearFlag(kCanBeDivByZero);
+ }
+ return result;
+ } else {
+ return HValue::InferRange(zone);
+ }
+}
+
+
+Range* HMathFloorOfDiv::InferRange(Zone* zone) {
+ if (representation().IsInteger32()) {
+ Range* a = left()->range();
+ Range* b = right()->range();
+ Range* result = new(zone) Range();
+ result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
+ (a->CanBeMinusZero() ||
+ (a->CanBeZero() && b->CanBeNegative())));
+ if (!a->Includes(kMinInt)) {
+ ClearFlag(kLeftCanBeMinInt);
+ }
+
+ if (!a->CanBeNegative()) {
+ ClearFlag(HValue::kLeftCanBeNegative);
+ }
+
+ if (!a->CanBePositive()) {
+ ClearFlag(HValue::kLeftCanBePositive);
+ }
+
+ if (!a->Includes(kMinInt) || !b->Includes(-1)) {
+ ClearFlag(kCanOverflow);
+ }
+
+ if (!b->CanBeZero()) {
+ ClearFlag(kCanBeDivByZero);
}
return result;
} else {
@@ -1781,6 +1974,10 @@ Range* HMod::InferRange(Zone* zone) {
result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
left_can_be_negative);
+ if (!a->CanBeNegative()) {
+ ClearFlag(HValue::kLeftCanBeNegative);
+ }
+
if (!a->Includes(kMinInt) || !b->Includes(-1)) {
ClearFlag(HValue::kCanOverflow);
}
@@ -2487,13 +2684,16 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
has_int32_value_(false),
has_double_value_(false),
has_external_reference_value_(false),
- is_internalized_string_(false),
is_not_in_new_space_(true),
- is_cell_(false),
- boolean_value_(handle->BooleanValue()) {
+ boolean_value_(handle->BooleanValue()),
+ is_undetectable_(false),
+ instance_type_(kUnknownInstanceType) {
if (handle->IsHeapObject()) {
- Heap* heap = Handle<HeapObject>::cast(handle)->GetHeap();
+ Handle<HeapObject> heap_obj = Handle<HeapObject>::cast(handle);
+ Heap* heap = heap_obj->GetHeap();
is_not_in_new_space_ = !heap->InNewSpace(*handle);
+ instance_type_ = heap_obj->map()->instance_type();
+ is_undetectable_ = heap_obj->map()->is_undetectable();
}
if (handle->IsNumber()) {
double n = handle->Number();
@@ -2503,12 +2703,8 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
double_value_ = n;
has_double_value_ = true;
// TODO(titzer): if this heap number is new space, tenure a new one.
- } else {
- is_internalized_string_ = handle->IsInternalizedString();
}
- is_cell_ = !handle.is_null() &&
- (handle->IsCell() || handle->IsPropertyCell());
Initialize(r);
}
@@ -2516,20 +2712,20 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
HConstant::HConstant(Unique<Object> unique,
Representation r,
HType type,
- bool is_internalize_string,
bool is_not_in_new_space,
- bool is_cell,
- bool boolean_value)
+ bool boolean_value,
+ bool is_undetectable,
+ InstanceType instance_type)
: HTemplateInstruction<0>(type),
object_(unique),
has_smi_value_(false),
has_int32_value_(false),
has_double_value_(false),
has_external_reference_value_(false),
- is_internalized_string_(is_internalize_string),
is_not_in_new_space_(is_not_in_new_space),
- is_cell_(is_cell),
- boolean_value_(boolean_value) {
+ boolean_value_(boolean_value),
+ is_undetectable_(is_undetectable),
+ instance_type_(instance_type) {
ASSERT(!unique.handle().is_null());
ASSERT(!type.IsTaggedNumber());
Initialize(r);
@@ -2545,12 +2741,12 @@ HConstant::HConstant(int32_t integer_value,
has_int32_value_(true),
has_double_value_(true),
has_external_reference_value_(false),
- is_internalized_string_(false),
is_not_in_new_space_(is_not_in_new_space),
- is_cell_(false),
boolean_value_(integer_value != 0),
+ is_undetectable_(false),
int32_value_(integer_value),
- double_value_(FastI2D(integer_value)) {
+ double_value_(FastI2D(integer_value)),
+ instance_type_(kUnknownInstanceType) {
// It's possible to create a constant with a value in Smi-range but stored
// in a (pre-existing) HeapNumber. See crbug.com/349878.
bool could_be_heapobject = r.IsTagged() && !object.handle().is_null();
@@ -2568,12 +2764,12 @@ HConstant::HConstant(double double_value,
has_int32_value_(IsInteger32(double_value)),
has_double_value_(true),
has_external_reference_value_(false),
- is_internalized_string_(false),
is_not_in_new_space_(is_not_in_new_space),
- is_cell_(false),
boolean_value_(double_value != 0 && !std::isnan(double_value)),
+ is_undetectable_(false),
int32_value_(DoubleToInt32(double_value)),
- double_value_(double_value) {
+ double_value_(double_value),
+ instance_type_(kUnknownInstanceType) {
has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_);
// It's possible to create a constant with a value in Smi-range but stored
// in a (pre-existing) HeapNumber. See crbug.com/349878.
@@ -2591,11 +2787,11 @@ HConstant::HConstant(ExternalReference reference)
has_int32_value_(false),
has_double_value_(false),
has_external_reference_value_(true),
- is_internalized_string_(false),
is_not_in_new_space_(true),
- is_cell_(false),
boolean_value_(true),
- external_reference_value_(reference) {
+ is_undetectable_(false),
+ external_reference_value_(reference),
+ instance_type_(kUnknownInstanceType) {
Initialize(Representation::External());
}
@@ -2694,10 +2890,10 @@ HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
return new(zone) HConstant(object_,
r,
type_,
- is_internalized_string_,
is_not_in_new_space_,
- is_cell_,
- boolean_value_);
+ boolean_value_,
+ is_undetectable_,
+ instance_type_);
}
@@ -3011,12 +3207,70 @@ void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
bool HCompareObjectEqAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
- if (left()->IsConstant() && right()->IsConstant()) {
- bool comparison_result =
- HConstant::cast(left())->DataEquals(HConstant::cast(right()));
- *block = comparison_result
- ? FirstSuccessor()
- : SecondSuccessor();
+ if (known_successor_index() != kNoKnownSuccessorIndex) {
+ *block = SuccessorAt(known_successor_index());
+ return true;
+ }
+ if (FLAG_fold_constants && left()->IsConstant() && right()->IsConstant()) {
+ *block = HConstant::cast(left())->DataEquals(HConstant::cast(right()))
+ ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
+bool ConstantIsObject(HConstant* constant, Isolate* isolate) {
+ if (constant->HasNumberValue()) return false;
+ if (constant->GetUnique().IsKnownGlobal(isolate->heap()->null_value())) {
+ return true;
+ }
+ if (constant->IsUndetectable()) return false;
+ InstanceType type = constant->GetInstanceType();
+ return (FIRST_NONCALLABLE_SPEC_OBJECT_TYPE <= type) &&
+ (type <= LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+}
+
+
+bool HIsObjectAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (FLAG_fold_constants && value()->IsConstant()) {
+ *block = ConstantIsObject(HConstant::cast(value()), isolate())
+ ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
+bool HIsStringAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (FLAG_fold_constants && value()->IsConstant()) {
+ *block = HConstant::cast(value())->HasStringValue()
+ ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
+bool HIsUndetectableAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (FLAG_fold_constants && value()->IsConstant()) {
+ *block = HConstant::cast(value())->IsUndetectable()
+ ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
+bool HHasInstanceTypeAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (FLAG_fold_constants && value()->IsConstant()) {
+ InstanceType type = HConstant::cast(value())->GetInstanceType();
+ *block = (from_ <= type) && (type <= to_)
+ ? FirstSuccessor() : SecondSuccessor();
return true;
}
*block = NULL;
@@ -3031,6 +3285,14 @@ void HCompareHoleAndBranch::InferRepresentation(
bool HCompareMinusZeroAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (FLAG_fold_constants && value()->IsConstant()) {
+ HConstant* constant = HConstant::cast(value());
+ if (constant->HasDoubleValue()) {
+ *block = IsMinusZero(constant->DoubleValue())
+ ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ }
+ }
if (value()->representation().IsSmiOrInteger32()) {
// A Smi or Integer32 cannot contain minus zero.
*block = SecondSuccessor();
@@ -3358,7 +3620,8 @@ void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) {
void HInnerAllocatedObject::PrintDataTo(StringStream* stream) {
base_object()->PrintNameTo(stream);
- stream->Add(" offset %d", offset());
+ stream->Add(" offset ");
+ offset()->PrintTo(stream);
}
@@ -3422,7 +3685,7 @@ Representation HUnaryMathOperation::RepresentationFromInputs() {
bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) {
- ASSERT(side_effect == kChangesNewSpacePromotion);
+ ASSERT(side_effect == kNewSpacePromotion);
Zone* zone = block()->zone();
if (!FLAG_use_allocation_folding) return false;
@@ -3435,6 +3698,15 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
return false;
}
+ // Check whether we are folding within the same block for local folding.
+ if (FLAG_use_local_allocation_folding && dominator->block() != block()) {
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) cannot fold into #%d (%s), crosses basic blocks\n",
+ id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+ }
+ return false;
+ }
+
HAllocate* dominator_allocate = HAllocate::cast(dominator);
HValue* dominator_size = dominator_allocate->size();
HValue* current_size = size();
@@ -3683,99 +3955,6 @@ void HAllocate::PrintDataTo(StringStream* stream) {
}
-HValue* HUnaryMathOperation::EnsureAndPropagateNotMinusZero(
- BitVector* visited) {
- visited->Add(id());
- if (representation().IsSmiOrInteger32() &&
- !value()->representation().Equals(representation())) {
- if (value()->range() == NULL || value()->range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- }
- if (RequiredInputRepresentation(0).IsSmiOrInteger32() &&
- representation().Equals(RequiredInputRepresentation(0))) {
- return value();
- }
- return NULL;
-}
-
-
-HValue* HChange::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (from().IsSmiOrInteger32()) return NULL;
- if (CanTruncateToInt32()) return NULL;
- if (value()->range() == NULL || value()->range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- ASSERT(!from().IsSmiOrInteger32() || !to().IsSmiOrInteger32());
- return NULL;
-}
-
-
-HValue* HForceRepresentation::EnsureAndPropagateNotMinusZero(
- BitVector* visited) {
- visited->Add(id());
- return value();
-}
-
-
-HValue* HMod::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (range() == NULL || range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- return left();
- }
- return NULL;
-}
-
-
-HValue* HDiv::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (range() == NULL || range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- return NULL;
-}
-
-
-HValue* HMathFloorOfDiv::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- SetFlag(kBailoutOnMinusZero);
- return NULL;
-}
-
-
-HValue* HMul::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- if (range() == NULL || range()->CanBeMinusZero()) {
- SetFlag(kBailoutOnMinusZero);
- }
- return NULL;
-}
-
-
-HValue* HSub::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- // Propagate to the left argument. If the left argument cannot be -0, then
- // the result of the add operation cannot be either.
- if (range() == NULL || range()->CanBeMinusZero()) {
- return left();
- }
- return NULL;
-}
-
-
-HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- // Propagate to the left argument. If the left argument cannot be -0, then
- // the result of the sub operation cannot be either.
- if (range() == NULL || range()->CanBeMinusZero()) {
- return left();
- }
- return NULL;
-}
-
-
bool HStoreKeyed::NeedsCanonicalization() {
// If value is an integer or smi or comes from the result of a keyed load or
// constant then it is either be a non-hole value or in the case of a constant
@@ -3846,9 +4025,15 @@ HInstruction* HStringAdd::New(Zone* zone,
HConstant* c_right = HConstant::cast(right);
HConstant* c_left = HConstant::cast(left);
if (c_left->HasStringValue() && c_right->HasStringValue()) {
- Handle<String> concat = zone->isolate()->factory()->NewFlatConcatString(
- c_left->StringValue(), c_right->StringValue());
- return HConstant::New(zone, context, concat);
+ Handle<String> left_string = c_left->StringValue();
+ Handle<String> right_string = c_right->StringValue();
+ // Prevent possible exception by invalid string length.
+ if (left_string->length() + right_string->length() < String::kMaxLength) {
+ Handle<String> concat = zone->isolate()->factory()->NewFlatConcatString(
+ c_left->StringValue(), c_right->StringValue());
+ ASSERT(!concat.is_null());
+ return HConstant::New(zone, context, concat);
+ }
}
}
return new(zone) HStringAdd(
@@ -3864,6 +4049,7 @@ void HStringAdd::PrintDataTo(StringStream* stream) {
} else if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_RIGHT) {
stream->Add("_CheckRight");
}
+ HBinaryOperation::PrintDataTo(stream);
stream->Add(" (");
if (pretenure_flag() == NOT_TENURED) stream->Add("N");
else if (pretenure_flag() == TENURED) stream->Add("D");
@@ -3913,6 +4099,8 @@ HInstruction* HUnaryMathOperation::New(
case kMathRound:
case kMathFloor:
return H_CONSTANT_DOUBLE(d);
+ case kMathClz32:
+ return H_CONSTANT_INT(32);
default:
UNREACHABLE();
break;
@@ -3938,6 +4126,11 @@ HInstruction* HUnaryMathOperation::New(
return H_CONSTANT_DOUBLE(std::floor(d + 0.5));
case kMathFloor:
return H_CONSTANT_DOUBLE(std::floor(d));
+ case kMathClz32: {
+ uint32_t i = DoubleToUint32(d);
+ return H_CONSTANT_INT(
+ (i == 0) ? 32 : CompilerIntrinsics::CountLeadingZeros(i));
+ }
default:
UNREACHABLE();
break;
@@ -4400,56 +4593,80 @@ HObjectAccess HObjectAccess::ForCellPayload(Isolate* isolate) {
}
-void HObjectAccess::SetGVNFlags(HValue *instr, bool is_store) {
+void HObjectAccess::SetGVNFlags(HValue *instr, PropertyAccessType access_type) {
// set the appropriate GVN flags for a given load or store instruction
- if (is_store) {
+ if (access_type == STORE) {
// track dominating allocations in order to eliminate write barriers
- instr->SetGVNFlag(kDependsOnNewSpacePromotion);
+ instr->SetDependsOnFlag(::v8::internal::kNewSpacePromotion);
instr->SetFlag(HValue::kTrackSideEffectDominators);
} else {
// try to GVN loads, but don't hoist above map changes
instr->SetFlag(HValue::kUseGVN);
- instr->SetGVNFlag(kDependsOnMaps);
+ instr->SetDependsOnFlag(::v8::internal::kMaps);
}
switch (portion()) {
case kArrayLengths:
- instr->SetGVNFlag(is_store
- ? kChangesArrayLengths : kDependsOnArrayLengths);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kArrayLengths);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kArrayLengths);
+ }
break;
case kStringLengths:
- instr->SetGVNFlag(is_store
- ? kChangesStringLengths : kDependsOnStringLengths);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kStringLengths);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kStringLengths);
+ }
break;
case kInobject:
- instr->SetGVNFlag(is_store
- ? kChangesInobjectFields : kDependsOnInobjectFields);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kInobjectFields);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kInobjectFields);
+ }
break;
case kDouble:
- instr->SetGVNFlag(is_store
- ? kChangesDoubleFields : kDependsOnDoubleFields);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kDoubleFields);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kDoubleFields);
+ }
break;
case kBackingStore:
- instr->SetGVNFlag(is_store
- ? kChangesBackingStoreFields : kDependsOnBackingStoreFields);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kBackingStoreFields);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kBackingStoreFields);
+ }
break;
case kElementsPointer:
- instr->SetGVNFlag(is_store
- ? kChangesElementsPointer : kDependsOnElementsPointer);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kElementsPointer);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kElementsPointer);
+ }
break;
case kMaps:
- instr->SetGVNFlag(is_store
- ? kChangesMaps : kDependsOnMaps);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kMaps);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kMaps);
+ }
break;
case kExternalMemory:
- instr->SetGVNFlag(is_store
- ? kChangesExternalMemory : kDependsOnExternalMemory);
+ if (access_type == STORE) {
+ instr->SetChangesFlag(::v8::internal::kExternalMemory);
+ } else {
+ instr->SetDependsOnFlag(::v8::internal::kExternalMemory);
+ }
break;
}
}
-void HObjectAccess::PrintTo(StringStream* stream) {
+void HObjectAccess::PrintTo(StringStream* stream) const {
stream->Add(".");
switch (portion()) {
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index a62f3cebf..1e6ac19bf 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -102,12 +102,14 @@ class LChunkBuilder;
V(CompareObjectEqAndBranch) \
V(CompareMap) \
V(Constant) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
V(Div) \
+ V(DoubleBits) \
V(DummyUse) \
V(EnterInlined) \
V(EnvironmentMarker) \
@@ -224,6 +226,9 @@ class LChunkBuilder;
}
+enum PropertyAccessType { LOAD, STORE };
+
+
class Range V8_FINAL : public ZoneObject {
public:
Range()
@@ -473,22 +478,28 @@ class HUseIterator V8_FINAL BASE_EMBEDDED {
};
-// There must be one corresponding kDepends flag for every kChanges flag and
-// the order of the kChanges flags must be exactly the same as of the kDepends
-// flags. All tracked flags should appear before untracked ones.
+// All tracked flags should appear before untracked ones.
enum GVNFlag {
// Declare global value numbering flags.
-#define DECLARE_FLAG(type) kChanges##type, kDependsOn##type,
+#define DECLARE_FLAG(Type) k##Type,
GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
#undef DECLARE_FLAG
- kNumberOfFlags,
-#define COUNT_FLAG(type) + 1
- kNumberOfTrackedSideEffects = 0 GVN_TRACKED_FLAG_LIST(COUNT_FLAG)
+#define COUNT_FLAG(Type) + 1
+ kNumberOfTrackedSideEffects = 0 GVN_TRACKED_FLAG_LIST(COUNT_FLAG),
+ kNumberOfUntrackedSideEffects = 0 GVN_UNTRACKED_FLAG_LIST(COUNT_FLAG),
#undef COUNT_FLAG
+ kNumberOfFlags = kNumberOfTrackedSideEffects + kNumberOfUntrackedSideEffects
};
+static inline GVNFlag GVNFlagFromInt(int i) {
+ ASSERT(i >= 0);
+ ASSERT(i < kNumberOfFlags);
+ return static_cast<GVNFlag>(i);
+}
+
+
class DecompositionResult V8_FINAL BASE_EMBEDDED {
public:
DecompositionResult() : base_(NULL), offset_(0), scale_(0) {}
@@ -534,7 +545,62 @@ class DecompositionResult V8_FINAL BASE_EMBEDDED {
};
-typedef EnumSet<GVNFlag, int64_t> GVNFlagSet;
+typedef EnumSet<GVNFlag, int32_t> GVNFlagSet;
+
+
+// This class encapsulates encoding and decoding of sources positions from
+// which hydrogen values originated.
+// When FLAG_track_hydrogen_positions is set this object encodes the
+// identifier of the inlining and absolute offset from the start of the
+// inlined function.
+// When the flag is not set we simply track absolute offset from the
+// script start.
+class HSourcePosition {
+ public:
+ HSourcePosition(const HSourcePosition& other) : value_(other.value_) { }
+
+ static HSourcePosition Unknown() {
+ return HSourcePosition(RelocInfo::kNoPosition);
+ }
+
+ bool IsUnknown() const { return value_ == RelocInfo::kNoPosition; }
+
+ int position() const { return PositionField::decode(value_); }
+ void set_position(int position) {
+ if (FLAG_hydrogen_track_positions) {
+ value_ = static_cast<int>(PositionField::update(value_, position));
+ } else {
+ value_ = position;
+ }
+ }
+
+ int inlining_id() const { return InliningIdField::decode(value_); }
+ void set_inlining_id(int inlining_id) {
+ if (FLAG_hydrogen_track_positions) {
+ value_ = static_cast<int>(InliningIdField::update(value_, inlining_id));
+ }
+ }
+
+ int raw() const { return value_; }
+
+ void PrintTo(FILE* f);
+
+ private:
+ typedef BitField<int, 0, 9> InliningIdField;
+
+ // Offset from the start of the inlined function.
+ typedef BitField<int, 9, 22> PositionField;
+
+ // On HPositionInfo can use this constructor.
+ explicit HSourcePosition(int value) : value_(value) { }
+
+ friend class HPositionInfo;
+
+ // If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
+ // and PositionField.
+ // Otherwise contains absolute offset from the script start.
+ int value_;
+};
class HValue : public ZoneObject {
@@ -556,6 +622,9 @@ class HValue : public ZoneObject {
kCanOverflow,
kBailoutOnMinusZero,
kCanBeDivByZero,
+ kLeftCanBeMinInt,
+ kLeftCanBeNegative,
+ kLeftCanBePositive,
kAllowUndefinedAsNaN,
kIsArguments,
kTruncatingToInt32,
@@ -585,18 +654,6 @@ class HValue : public ZoneObject {
STATIC_ASSERT(kLastFlag < kBitsPerInt);
- static const int kChangesToDependsFlagsLeftShift = 1;
-
- static GVNFlag ChangesFlagFromInt(int x) {
- return static_cast<GVNFlag>(x * 2);
- }
- static GVNFlag DependsOnFlagFromInt(int x) {
- return static_cast<GVNFlag>(x * 2 + 1);
- }
- static GVNFlagSet ConvertChangesToDependsFlags(GVNFlagSet flags) {
- return GVNFlagSet(flags.ToIntegral() << kChangesToDependsFlagsLeftShift);
- }
-
static HValue* cast(HValue* value) { return value; }
enum Opcode {
@@ -630,8 +687,12 @@ class HValue : public ZoneObject {
flags_(0) {}
virtual ~HValue() {}
- virtual int position() const { return RelocInfo::kNoPosition; }
- virtual int operand_position(int index) const { return position(); }
+ virtual HSourcePosition position() const {
+ return HSourcePosition::Unknown();
+ }
+ virtual HSourcePosition operand_position(int index) const {
+ return position();
+ }
HBasicBlock* block() const { return block_; }
void SetBlock(HBasicBlock* block);
@@ -681,21 +742,6 @@ class HValue : public ZoneObject {
return representation_.IsHeapObject() || type_.IsHeapObject();
}
- // An operation needs to override this function iff:
- // 1) it can produce an int32 output.
- // 2) the true value of its output can potentially be minus zero.
- // The implementation must set a flag so that it bails out in the case where
- // it would otherwise output what should be a minus zero as an int32 zero.
- // If the operation also exists in a form that takes int32 and outputs int32
- // then the operation should return its input value so that we can propagate
- // back. There are three operations that need to propagate back to more than
- // one input. They are phi and binary div and mul. They always return NULL
- // and expect the caller to take care of things.
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited) {
- visited->Add(id());
- return NULL;
- }
-
// There are HInstructions that do not really change a value, they
// only add pieces of information to it (like bounds checks, map checks,
// smi checks...).
@@ -772,43 +818,38 @@ class HValue : public ZoneObject {
// of uses is non-empty.
bool HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const;
- GVNFlagSet gvn_flags() const { return gvn_flags_; }
- void SetGVNFlag(GVNFlag f) { gvn_flags_.Add(f); }
- void ClearGVNFlag(GVNFlag f) { gvn_flags_.Remove(f); }
- bool CheckGVNFlag(GVNFlag f) const { return gvn_flags_.Contains(f); }
- void SetAllSideEffects() { gvn_flags_.Add(AllSideEffectsFlagSet()); }
+ GVNFlagSet ChangesFlags() const { return changes_flags_; }
+ GVNFlagSet DependsOnFlags() const { return depends_on_flags_; }
+ void SetChangesFlag(GVNFlag f) { changes_flags_.Add(f); }
+ void SetDependsOnFlag(GVNFlag f) { depends_on_flags_.Add(f); }
+ void ClearChangesFlag(GVNFlag f) { changes_flags_.Remove(f); }
+ void ClearDependsOnFlag(GVNFlag f) { depends_on_flags_.Remove(f); }
+ bool CheckChangesFlag(GVNFlag f) const {
+ return changes_flags_.Contains(f);
+ }
+ bool CheckDependsOnFlag(GVNFlag f) const {
+ return depends_on_flags_.Contains(f);
+ }
+ void SetAllSideEffects() { changes_flags_.Add(AllSideEffectsFlagSet()); }
void ClearAllSideEffects() {
- gvn_flags_.Remove(AllSideEffectsFlagSet());
+ changes_flags_.Remove(AllSideEffectsFlagSet());
}
bool HasSideEffects() const {
- return gvn_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
+ return changes_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
}
bool HasObservableSideEffects() const {
return !CheckFlag(kHasNoObservableSideEffects) &&
- gvn_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
- }
-
- GVNFlagSet DependsOnFlags() const {
- GVNFlagSet result = gvn_flags_;
- result.Intersect(AllDependsOnFlagSet());
- return result;
+ changes_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
}
GVNFlagSet SideEffectFlags() const {
- GVNFlagSet result = gvn_flags_;
+ GVNFlagSet result = ChangesFlags();
result.Intersect(AllSideEffectsFlagSet());
return result;
}
- GVNFlagSet ChangesFlags() const {
- GVNFlagSet result = gvn_flags_;
- result.Intersect(AllChangesFlagSet());
- return result;
- }
-
GVNFlagSet ObservableChangesFlags() const {
- GVNFlagSet result = gvn_flags_;
- result.Intersect(AllChangesFlagSet());
+ GVNFlagSet result = ChangesFlags();
result.Intersect(AllObservableSideEffectsFlagSet());
return result;
}
@@ -816,11 +857,6 @@ class HValue : public ZoneObject {
Range* range() const { return range_; }
// TODO(svenpanne) We should really use the null object pattern here.
bool HasRange() const { return range_ != NULL; }
- bool CanBeNegative() const { return !HasRange() || range()->CanBeNegative(); }
- bool CanBeZero() const { return !HasRange() || range()->CanBeZero(); }
- bool RangeCanInclude(int value) const {
- return !HasRange() || range()->Includes(value);
- }
void AddNewRange(Range* r, Zone* zone);
void RemoveLastAddedRange();
void ComputeInitialRange(Zone* zone);
@@ -949,20 +985,9 @@ class HValue : public ZoneObject {
representation_ = r;
}
- static GVNFlagSet AllDependsOnFlagSet() {
+ static GVNFlagSet AllFlagSet() {
GVNFlagSet result;
- // Create changes mask.
-#define ADD_FLAG(type) result.Add(kDependsOn##type);
- GVN_TRACKED_FLAG_LIST(ADD_FLAG)
- GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
-#undef ADD_FLAG
- return result;
- }
-
- static GVNFlagSet AllChangesFlagSet() {
- GVNFlagSet result;
- // Create changes mask.
-#define ADD_FLAG(type) result.Add(kChanges##type);
+#define ADD_FLAG(Type) result.Add(k##Type);
GVN_TRACKED_FLAG_LIST(ADD_FLAG)
GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
#undef ADD_FLAG
@@ -971,19 +996,19 @@ class HValue : public ZoneObject {
// A flag mask to mark an instruction as having arbitrary side effects.
static GVNFlagSet AllSideEffectsFlagSet() {
- GVNFlagSet result = AllChangesFlagSet();
- result.Remove(kChangesOsrEntries);
+ GVNFlagSet result = AllFlagSet();
+ result.Remove(kOsrEntries);
return result;
}
// A flag mask of all side effects that can make observable changes in
// an executing program (i.e. are not safe to repeat, move or remove);
static GVNFlagSet AllObservableSideEffectsFlagSet() {
- GVNFlagSet result = AllChangesFlagSet();
- result.Remove(kChangesNewSpacePromotion);
- result.Remove(kChangesElementsKind);
- result.Remove(kChangesElementsPointer);
- result.Remove(kChangesMaps);
+ GVNFlagSet result = AllFlagSet();
+ result.Remove(kNewSpacePromotion);
+ result.Remove(kElementsKind);
+ result.Remove(kElementsPointer);
+ result.Remove(kMaps);
return result;
}
@@ -1004,7 +1029,8 @@ class HValue : public ZoneObject {
HUseListNode* use_list_;
Range* range_;
int flags_;
- GVNFlagSet gvn_flags_;
+ GVNFlagSet changes_flags_;
+ GVNFlagSet depends_on_flags_;
private:
virtual bool IsDeletable() const { return false; }
@@ -1103,25 +1129,22 @@ class HValue : public ZoneObject {
// In the first case it contains intruction's position as a tagged value.
// In the second case it points to an array which contains instruction's
// position and operands' positions.
-// TODO(vegorov): what we really want to track here is a combination of
-// source position and a script id because cross script inlining can easily
-// result in optimized functions composed of several scripts.
class HPositionInfo {
public:
explicit HPositionInfo(int pos) : data_(TagPosition(pos)) { }
- int position() const {
+ HSourcePosition position() const {
if (has_operand_positions()) {
- return static_cast<int>(operand_positions()[kInstructionPosIndex]);
+ return operand_positions()[kInstructionPosIndex];
}
- return static_cast<int>(UntagPosition(data_));
+ return HSourcePosition(static_cast<int>(UntagPosition(data_)));
}
- void set_position(int pos) {
+ void set_position(HSourcePosition pos) {
if (has_operand_positions()) {
operand_positions()[kInstructionPosIndex] = pos;
} else {
- data_ = TagPosition(pos);
+ data_ = TagPosition(pos.raw());
}
}
@@ -1131,27 +1154,27 @@ class HPositionInfo {
}
const int length = kFirstOperandPosIndex + operand_count;
- intptr_t* positions =
- zone->NewArray<intptr_t>(length);
+ HSourcePosition* positions =
+ zone->NewArray<HSourcePosition>(length);
for (int i = 0; i < length; i++) {
- positions[i] = RelocInfo::kNoPosition;
+ positions[i] = HSourcePosition::Unknown();
}
- const int pos = position();
+ const HSourcePosition pos = position();
data_ = reinterpret_cast<intptr_t>(positions);
set_position(pos);
ASSERT(has_operand_positions());
}
- int operand_position(int idx) const {
+ HSourcePosition operand_position(int idx) const {
if (!has_operand_positions()) {
return position();
}
- return static_cast<int>(*operand_position_slot(idx));
+ return *operand_position_slot(idx);
}
- void set_operand_position(int idx, int pos) {
+ void set_operand_position(int idx, HSourcePosition pos) {
*operand_position_slot(idx) = pos;
}
@@ -1159,7 +1182,7 @@ class HPositionInfo {
static const intptr_t kInstructionPosIndex = 0;
static const intptr_t kFirstOperandPosIndex = 1;
- intptr_t* operand_position_slot(int idx) const {
+ HSourcePosition* operand_position_slot(int idx) const {
ASSERT(has_operand_positions());
return &(operand_positions()[kFirstOperandPosIndex + idx]);
}
@@ -1168,9 +1191,9 @@ class HPositionInfo {
return !IsTaggedPosition(data_);
}
- intptr_t* operand_positions() const {
+ HSourcePosition* operand_positions() const {
ASSERT(has_operand_positions());
- return reinterpret_cast<intptr_t*>(data_);
+ return reinterpret_cast<HSourcePosition*>(data_);
}
static const intptr_t kPositionTag = 1;
@@ -1218,23 +1241,23 @@ class HInstruction : public HValue {
}
// The position is a write-once variable.
- virtual int position() const V8_OVERRIDE {
- return position_.position();
+ virtual HSourcePosition position() const V8_OVERRIDE {
+ return HSourcePosition(position_.position());
}
bool has_position() const {
- return position_.position() != RelocInfo::kNoPosition;
+ return !position().IsUnknown();
}
- void set_position(int position) {
+ void set_position(HSourcePosition position) {
ASSERT(!has_position());
- ASSERT(position != RelocInfo::kNoPosition);
+ ASSERT(!position.IsUnknown());
position_.set_position(position);
}
- virtual int operand_position(int index) const V8_OVERRIDE {
- const int pos = position_.operand_position(index);
- return (pos != RelocInfo::kNoPosition) ? pos : position();
+ virtual HSourcePosition operand_position(int index) const V8_OVERRIDE {
+ const HSourcePosition pos = position_.operand_position(index);
+ return pos.IsUnknown() ? position() : pos;
}
- void set_operand_position(Zone* zone, int index, int pos) {
+ void set_operand_position(Zone* zone, int index, HSourcePosition pos) {
ASSERT(0 <= index && index < OperandCount());
position_.ensure_storage_for_operand_positions(zone, OperandCount());
position_.set_operand_position(index, pos);
@@ -1248,6 +1271,8 @@ class HInstruction : public HValue {
virtual void Verify() V8_OVERRIDE;
#endif
+ bool CanDeoptimize();
+
virtual bool HasStackCheck() { return false; }
DECLARE_ABSTRACT_INSTRUCTION(Instruction)
@@ -1258,7 +1283,7 @@ class HInstruction : public HValue {
next_(NULL),
previous_(NULL),
position_(RelocInfo::kNoPosition) {
- SetGVNFlag(kDependsOnOsrEntries);
+ SetDependsOnFlag(kOsrEntries);
}
virtual void DeleteFromGraph() V8_OVERRIDE { Unlink(); }
@@ -1679,9 +1704,6 @@ class HForceRepresentation V8_FINAL : public HTemplateInstruction<1> {
HValue* value() { return OperandAt(0); }
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return representation(); // Same as the output representation.
}
@@ -1710,6 +1732,7 @@ class HChange V8_FINAL : public HUnaryOperation {
ASSERT(!value->representation().Equals(to));
set_representation(to);
SetFlag(kUseGVN);
+ SetFlag(kCanOverflow);
if (is_truncating_to_smi) {
SetFlag(kTruncatingToSmi);
SetFlag(kTruncatingToInt32);
@@ -1719,7 +1742,7 @@ class HChange V8_FINAL : public HUnaryOperation {
set_type(HType::Smi());
} else {
set_type(HType::TaggedNumber());
- if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
+ if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
}
}
@@ -1727,8 +1750,6 @@ class HChange V8_FINAL : public HUnaryOperation {
return CheckUsesForFlag(kAllowUndefinedAsNaN);
}
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
virtual HType CalculateInferredType() V8_OVERRIDE;
virtual HValue* Canonicalize() V8_OVERRIDE;
@@ -1782,6 +1803,65 @@ class HClampToUint8 V8_FINAL : public HUnaryOperation {
};
+class HDoubleBits V8_FINAL : public HUnaryOperation {
+ public:
+ enum Bits { HIGH, LOW };
+ DECLARE_INSTRUCTION_FACTORY_P2(HDoubleBits, HValue*, Bits);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::Double();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits)
+
+ Bits bits() { return bits_; }
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ return other->IsDoubleBits() && HDoubleBits::cast(other)->bits() == bits();
+ }
+
+ private:
+ HDoubleBits(HValue* value, Bits bits)
+ : HUnaryOperation(value), bits_(bits) {
+ set_representation(Representation::Integer32());
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+
+ Bits bits_;
+};
+
+
+class HConstructDouble V8_FINAL : public HTemplateInstruction<2> {
+ public:
+ DECLARE_INSTRUCTION_FACTORY_P2(HConstructDouble, HValue*, HValue*);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::Integer32();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble)
+
+ HValue* hi() { return OperandAt(0); }
+ HValue* lo() { return OperandAt(1); }
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ explicit HConstructDouble(HValue* hi, HValue* lo) {
+ set_representation(Representation::Double());
+ SetFlag(kUseGVN);
+ SetOperandAt(0, hi);
+ SetOperandAt(1, lo);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+};
+
+
enum RemovableSimulate {
REMOVABLE_SIMULATE,
FIXED_SIMULATE
@@ -1967,7 +2047,7 @@ class HStackCheck V8_FINAL : public HTemplateInstruction<1> {
private:
HStackCheck(HValue* context, Type type) : type_(type) {
SetOperandAt(0, context);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
Type type_;
@@ -2515,7 +2595,7 @@ class HMapEnumLength V8_FINAL : public HUnaryOperation {
: HUnaryOperation(value, HType::Smi()) {
set_representation(Representation::Smi());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
+ SetDependsOnFlag(kMaps);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -2534,9 +2614,6 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
if (index == 0) {
return Representation::Tagged();
@@ -2551,6 +2628,8 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
return Representation::Double();
case kMathAbs:
return representation();
+ case kMathClz32:
+ return Representation::Integer32();
default:
UNREACHABLE();
return Representation::None();
@@ -2582,6 +2661,7 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
switch (op) {
case kMathFloor:
case kMathRound:
+ case kMathClz32:
set_representation(Representation::Integer32());
break;
case kMathAbs:
@@ -2589,7 +2669,7 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
SetFlag(kFlexibleRepresentation);
// TODO(svenpanne) This flag is actually only needed if representation()
// is tagged, and not when it is an unboxed double or unboxed integer.
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
break;
case kMathLog:
case kMathExp:
@@ -2638,7 +2718,7 @@ class HLoadRoot V8_FINAL : public HTemplateInstruction<0> {
SetFlag(kUseGVN);
// TODO(bmeurer): We'll need kDependsOnRoots once we add the
// corresponding HStoreRoot instruction.
- SetGVNFlag(kDependsOnCalls);
+ SetDependsOnFlag(kCalls);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -2651,10 +2731,10 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
public:
static HCheckMaps* New(Zone* zone, HValue* context, HValue* value,
Handle<Map> map, CompilationInfo* info,
- HValue *typecheck = NULL);
+ HValue* typecheck = NULL);
static HCheckMaps* New(Zone* zone, HValue* context,
HValue* value, SmallMapList* maps,
- HValue *typecheck = NULL) {
+ HValue* typecheck = NULL) {
HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
for (int i = 0; i < maps->length(); i++) {
check_map->Add(maps->at(i), zone);
@@ -2673,10 +2753,18 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
+ HValue* typecheck() { return OperandAt(1); }
Unique<Map> first_map() const { return map_set_.at(0); }
UniqueSet<Map> map_set() const { return map_set_; }
+ void set_map_set(UniqueSet<Map>* maps, Zone *zone) {
+ map_set_.Clear();
+ for (int i = 0; i < maps->size(); i++) {
+ map_set_.Add(maps->at(i), zone);
+ }
+ }
+
bool has_migration_target() const {
return has_migration_target_;
}
@@ -2693,9 +2781,12 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
private:
void Add(Handle<Map> map, Zone* zone) {
map_set_.Add(Unique<Map>(map), zone);
+ SetDependsOnFlag(kMaps);
+ SetDependsOnFlag(kElementsKind);
+
if (!has_migration_target_ && map->is_migration_target()) {
has_migration_target_ = true;
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
}
@@ -2709,8 +2800,6 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnElementsKind);
}
bool omit_;
@@ -3149,7 +3238,7 @@ class HPhi V8_FINAL : public HValue {
bool IsReceiver() const { return merged_index_ == 0; }
bool HasMergedIndex() const { return merged_index_ != kInvalidMergedIndex; }
- virtual int position() const V8_OVERRIDE;
+ virtual HSourcePosition position() const V8_OVERRIDE;
int merged_index() const { return merged_index_; }
@@ -3314,7 +3403,7 @@ class HCapturedObject V8_FINAL : public HDematerializedObject {
void ReuseSideEffectsFromStore(HInstruction* store) {
ASSERT(store->HasObservableSideEffects());
ASSERT(store->IsStoreNamedField());
- gvn_flags_.Add(store->gvn_flags());
+ changes_flags_.Add(store->ChangesFlags());
}
// Replay effects of this instruction on the given environment.
@@ -3365,8 +3454,8 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
bool is_not_in_new_space,
HInstruction* instruction) {
return instruction->Prepend(new(zone) HConstant(
- unique, Representation::Tagged(), HType::Tagged(), false,
- is_not_in_new_space, false, false));
+ unique, Representation::Tagged(), HType::Tagged(),
+ is_not_in_new_space, false, false, kUnknownInstanceType));
}
Handle<Object> handle(Isolate* isolate) {
@@ -3401,7 +3490,7 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
bool ImmortalImmovable() const;
bool IsCell() const {
- return is_cell_;
+ return instance_type_ == CELL_TYPE || instance_type_ == PROPERTY_CELL_TYPE;
}
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
@@ -3449,14 +3538,14 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
bool HasStringValue() const {
if (has_double_value_ || has_int32_value_) return false;
ASSERT(!object_.handle().is_null());
- return type_.IsString();
+ return instance_type_ < FIRST_NONSTRING_TYPE;
}
Handle<String> StringValue() const {
ASSERT(HasStringValue());
return Handle<String>::cast(object_.handle());
}
bool HasInternalizedStringValue() const {
- return HasStringValue() && is_internalized_string_;
+ return HasStringValue() && StringShape(instance_type_).IsInternalized();
}
bool HasExternalReferenceValue() const {
@@ -3468,6 +3557,8 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
bool HasBooleanValue() const { return type_.IsBoolean(); }
bool BooleanValue() const { return boolean_value_; }
+ bool IsUndetectable() const { return is_undetectable_; }
+ InstanceType GetInstanceType() const { return instance_type_; }
virtual intptr_t Hashcode() V8_OVERRIDE {
if (has_int32_value_) {
@@ -3493,6 +3584,10 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
return object_;
}
+ bool EqualsUnique(Unique<Object> other) const {
+ return object_.IsInitialized() && object_ == other;
+ }
+
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HConstant* other_constant = HConstant::cast(other);
if (has_int32_value_) {
@@ -3540,10 +3635,10 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
HConstant(Unique<Object> unique,
Representation r,
HType type,
- bool is_internalized_string,
bool is_not_in_new_space,
- bool is_cell,
- bool boolean_value);
+ bool boolean_value,
+ bool is_undetectable,
+ InstanceType instance_type);
explicit HConstant(ExternalReference reference);
@@ -3566,13 +3661,15 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
bool has_int32_value_ : 1;
bool has_double_value_ : 1;
bool has_external_reference_value_ : 1;
- bool is_internalized_string_ : 1; // TODO(yangguo): make this part of HType.
bool is_not_in_new_space_ : 1;
- bool is_cell_ : 1;
bool boolean_value_ : 1;
+ bool is_undetectable_: 1;
int32_t int32_value_;
double double_value_;
ExternalReference external_reference_value_;
+
+ static const InstanceType kUnknownInstanceType = FILLER_TYPE;
+ InstanceType instance_type_;
};
@@ -3654,11 +3751,19 @@ class HBinaryOperation : public HTemplateInstruction<3> {
return representation();
}
- void SetOperandPositions(Zone* zone, int left_pos, int right_pos) {
+ void SetOperandPositions(Zone* zone,
+ HSourcePosition left_pos,
+ HSourcePosition right_pos) {
set_operand_position(zone, 1, left_pos);
set_operand_position(zone, 2, right_pos);
}
+ bool RightIsPowerOf2() {
+ if (!right()->IsInteger32Constant()) return false;
+ int32_t value = right()->GetInteger32Constant();
+ return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
private:
@@ -3947,7 +4052,6 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
if (to.IsTagged() &&
(left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
SetAllSideEffects();
@@ -3956,6 +4060,7 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
ClearAllSideEffects();
SetFlag(kUseGVN);
}
+ if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
}
virtual void UpdateRepresentation(Representation new_rep,
@@ -3990,9 +4095,6 @@ class HMathFloorOfDiv V8_FINAL : public HBinaryOperation {
HValue*,
HValue*);
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv)
protected:
@@ -4004,12 +4106,15 @@ class HMathFloorOfDiv V8_FINAL : public HBinaryOperation {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
SetFlag(kCanOverflow);
- if (!right->IsConstant()) {
- SetFlag(kCanBeDivByZero);
- }
+ SetFlag(kCanBeDivByZero);
+ SetFlag(kLeftCanBeMinInt);
+ SetFlag(kLeftCanBeNegative);
+ SetFlag(kLeftCanBePositive);
SetFlag(kAllowUndefinedAsNaN);
}
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
@@ -4024,7 +4129,6 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
if (to.IsTagged() &&
(left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
SetAllSideEffects();
@@ -4033,12 +4137,7 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
ClearAllSideEffects();
SetFlag(kUseGVN);
}
- }
-
- bool RightIsPowerOf2() {
- if (!right()->IsInteger32Constant()) return false;
- int32_t value = right()->GetInteger32Constant();
- return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
+ if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
}
DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation)
@@ -4109,7 +4208,9 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
}
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- void SetOperandPositions(Zone* zone, int left_pos, int right_pos) {
+ void SetOperandPositions(Zone* zone,
+ HSourcePosition left_pos,
+ HSourcePosition right_pos) {
set_operand_position(zone, 0, left_pos);
set_operand_position(zone, 1, right_pos);
}
@@ -4192,6 +4293,12 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+ static const int kNoKnownSuccessorIndex = -1;
+ int known_successor_index() const { return known_successor_index_; }
+ void set_known_successor_index(int known_successor_index) {
+ known_successor_index_ = known_successor_index;
+ }
+
HValue* left() { return OperandAt(0); }
HValue* right() { return OperandAt(1); }
@@ -4211,7 +4318,8 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
HCompareObjectEqAndBranch(HValue* left,
HValue* right,
HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL) {
+ HBasicBlock* false_target = NULL)
+ : known_successor_index_(kNoKnownSuccessorIndex) {
ASSERT(!left->IsConstant() ||
(!HConstant::cast(left)->HasInteger32Value() ||
HConstant::cast(left)->HasSmiValue()));
@@ -4223,6 +4331,8 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
SetSuccessorAt(0, true_target);
SetSuccessorAt(1, false_target);
}
+
+ int known_successor_index_;
};
@@ -4236,6 +4346,8 @@ class HIsObjectAndBranch V8_FINAL : public HUnaryControlInstruction {
return Representation::Tagged();
}
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
private:
@@ -4256,6 +4368,8 @@ class HIsStringAndBranch V8_FINAL : public HUnaryControlInstruction {
return Representation::Tagged();
}
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
protected:
@@ -4289,7 +4403,9 @@ class HIsSmiAndBranch V8_FINAL : public HUnaryControlInstruction {
HIsSmiAndBranch(HValue* value,
HBasicBlock* true_target = NULL,
HBasicBlock* false_target = NULL)
- : HUnaryControlInstruction(value, true_target, false_target) {}
+ : HUnaryControlInstruction(value, true_target, false_target) {
+ set_representation(Representation::Tagged());
+ }
};
@@ -4303,6 +4419,8 @@ class HIsUndetectableAndBranch V8_FINAL : public HUnaryControlInstruction {
return Representation::Tagged();
}
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch)
private:
@@ -4348,7 +4466,7 @@ class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> {
SetOperandAt(1, left);
SetOperandAt(2, right);
set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
Token::Value token_;
@@ -4385,6 +4503,8 @@ class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction {
return Representation::Tagged();
}
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch)
private:
@@ -4466,8 +4586,7 @@ class HTypeofIsAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HTypeofIsAndBranch, HValue*, Handle<String>);
- Handle<String> type_literal() { return type_literal_; }
- bool compares_number_type() { return compares_number_type_; }
+ Handle<String> type_literal() { return type_literal_.handle(); }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
@@ -4478,16 +4597,16 @@ class HTypeofIsAndBranch V8_FINAL : public HUnaryControlInstruction {
virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
+ type_literal_ = Unique<String>(type_literal_.handle());
+ }
+
private:
HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
: HUnaryControlInstruction(value, NULL, NULL),
- type_literal_(type_literal) {
- Heap* heap = type_literal->GetHeap();
- compares_number_type_ = type_literal->Equals(heap->number_string());
- }
+ type_literal_(Unique<String>::CreateUninitialized(type_literal)) { }
- Handle<String> type_literal_;
- bool compares_number_type_ : 1;
+ Unique<String> type_literal_;
};
@@ -4573,7 +4692,7 @@ class HPower V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, right);
set_representation(Representation::Double());
SetFlag(kUseGVN);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
virtual bool IsDeletable() const V8_OVERRIDE {
@@ -4596,9 +4715,6 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
return !representation().IsTagged() && !representation().IsExternal();
}
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual HValue* Canonicalize() V8_OVERRIDE;
virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
@@ -4614,10 +4730,6 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (to.IsTagged()) {
- SetGVNFlag(kChangesNewSpacePromotion);
- ClearFlag(kAllowUndefinedAsNaN);
- }
if (to.IsTagged() &&
(left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved() ||
left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved())) {
@@ -4627,6 +4739,10 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
ClearAllSideEffects();
SetFlag(kUseGVN);
}
+ if (to.IsTagged()) {
+ SetChangesFlag(kNewSpacePromotion);
+ ClearFlag(kAllowUndefinedAsNaN);
+ }
}
virtual Representation RepresentationFromInputs() V8_OVERRIDE;
@@ -4655,9 +4771,6 @@ class HSub V8_FINAL : public HArithmeticBinaryOperation {
HValue* left,
HValue* right);
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual HValue* Canonicalize() V8_OVERRIDE;
virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
@@ -4704,9 +4817,6 @@ class HMul V8_FINAL : public HArithmeticBinaryOperation {
return mul;
}
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual HValue* Canonicalize() V8_OVERRIDE;
// Only commutative if it is certain that not two objects are multiplicated.
@@ -4744,9 +4854,6 @@ class HMod V8_FINAL : public HArithmeticBinaryOperation {
HValue* left,
HValue* right);
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual HValue* Canonicalize() V8_OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
@@ -4769,6 +4876,7 @@ class HMod V8_FINAL : public HArithmeticBinaryOperation {
HValue* right) : HArithmeticBinaryOperation(context, left, right) {
SetFlag(kCanBeDivByZero);
SetFlag(kCanOverflow);
+ SetFlag(kLeftCanBeNegative);
}
};
@@ -4780,9 +4888,6 @@ class HDiv V8_FINAL : public HArithmeticBinaryOperation {
HValue* left,
HValue* right);
- virtual HValue* EnsureAndPropagateNotMinusZero(
- BitVector* visited) V8_OVERRIDE;
-
virtual HValue* Canonicalize() V8_OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
@@ -5072,8 +5177,8 @@ class HOsrEntry V8_FINAL : public HTemplateInstruction<0> {
private:
explicit HOsrEntry(BailoutId ast_id) : ast_id_(ast_id) {
- SetGVNFlag(kChangesOsrEntries);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kOsrEntries);
+ SetChangesFlag(kNewSpacePromotion);
}
BailoutId ast_id_;
@@ -5215,7 +5320,7 @@ class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> {
: cell_(Unique<Cell>::CreateUninitialized(cell)), details_(details) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnGlobalVars);
+ SetDependsOnFlag(kGlobalVars);
}
virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); }
@@ -5367,8 +5472,8 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, size);
set_representation(Representation::Tagged());
SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kChangesNewSpacePromotion);
- SetGVNFlag(kDependsOnNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
+ SetDependsOnFlag(kNewSpacePromotion);
if (FLAG_trace_pretenuring) {
PrintF("HAllocate with AllocationSite %p %s\n",
@@ -5566,7 +5671,7 @@ class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
: HUnaryOperation(value),
cell_(Unique<PropertyCell>::CreateUninitialized(cell)),
details_(details) {
- SetGVNFlag(kChangesGlobalVars);
+ SetChangesFlag(kGlobalVars);
}
Unique<PropertyCell> cell_;
@@ -5594,10 +5699,10 @@ class HLoadContextSlot V8_FINAL : public HUnaryOperation {
ASSERT(var->IsContextSlot());
switch (var->mode()) {
case LET:
- case CONST_HARMONY:
+ case CONST:
mode_ = kCheckDeoptimize;
break;
- case CONST:
+ case CONST_LEGACY:
mode_ = kCheckReturnUndefined;
break;
default:
@@ -5605,7 +5710,7 @@ class HLoadContextSlot V8_FINAL : public HUnaryOperation {
}
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnContextSlots);
+ SetDependsOnFlag(kContextSlots);
}
int slot_index() const { return slot_index_; }
@@ -5689,7 +5794,7 @@ class HStoreContextSlot V8_FINAL : public HTemplateInstruction<2> {
: slot_index_(slot_index), mode_(mode) {
SetOperandAt(0, context);
SetOperandAt(1, value);
- SetGVNFlag(kChangesContextSlots);
+ SetChangesFlag(kContextSlots);
}
int slot_index_;
@@ -5773,9 +5878,8 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(
kArrayLengths,
JSArray::kLengthOffset,
- IsFastElementsKind(elements_kind) &&
- FLAG_track_fields
- ? Representation::Smi() : Representation::Tagged());
+ IsFastElementsKind(elements_kind)
+ ? Representation::Smi() : Representation::Tagged());
}
static HObjectAccess ForAllocationSiteOffset(int offset);
@@ -5789,7 +5893,7 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(
kArrayLengths,
FixedArray::kLengthOffset,
- FLAG_track_fields ? Representation::Smi() : Representation::Tagged());
+ Representation::Smi());
}
static HObjectAccess ForStringHashField() {
@@ -5803,7 +5907,7 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(
kStringLengths,
String::kLengthOffset,
- FLAG_track_fields ? Representation::Smi() : Representation::Tagged());
+ Representation::Smi());
}
static HObjectAccess ForConsStringFirst() {
@@ -5834,18 +5938,6 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(kInobject, SharedFunctionInfo::kCodeOffset);
}
- static HObjectAccess ForFirstCodeSlot() {
- return HObjectAccess(kInobject, SharedFunctionInfo::kFirstCodeSlot);
- }
-
- static HObjectAccess ForFirstContextSlot() {
- return HObjectAccess(kInobject, SharedFunctionInfo::kFirstContextSlot);
- }
-
- static HObjectAccess ForFirstOsrAstIdSlot() {
- return HObjectAccess(kInobject, SharedFunctionInfo::kFirstOsrAstIdSlot);
- }
-
static HObjectAccess ForOptimizedCodeMap() {
return HObjectAccess(kInobject,
SharedFunctionInfo::kOptimizedCodeMapOffset);
@@ -5967,14 +6059,14 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(kInobject, GlobalObject::kNativeContextOffset);
}
- void PrintTo(StringStream* stream);
+ void PrintTo(StringStream* stream) const;
inline bool Equals(HObjectAccess that) const {
return value_ == that.value_; // portion and offset must match
}
protected:
- void SetGVNFlags(HValue *instr, bool is_store);
+ void SetGVNFlags(HValue *instr, PropertyAccessType access_type);
private:
// internal use only; different parts of an object or array
@@ -5989,6 +6081,8 @@ class HObjectAccess V8_FINAL {
kExternalMemory // some field in external memory
};
+ HObjectAccess() : value_(0) {}
+
HObjectAccess(Portion portion, int offset,
Representation representation = Representation::Tagged(),
Handle<String> name = Handle<String>::null(),
@@ -6021,6 +6115,7 @@ class HObjectAccess V8_FINAL {
friend class HLoadNamedField;
friend class HStoreNamedField;
+ friend class SideEffectsTracker;
inline Portion portion() const {
return PortionField::decode(value_);
@@ -6091,14 +6186,13 @@ class HLoadNamedField V8_FINAL : public HTemplateInstruction<2> {
representation.IsExternal() ||
representation.IsInteger32()) {
set_representation(representation);
- } else if (FLAG_track_heap_object_fields &&
- representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
set_type(HType::NonPrimitive());
set_representation(Representation::Tagged());
} else {
set_representation(Representation::Tagged());
}
- access.SetGVNFlags(this, false);
+ access.SetGVNFlags(this, LOAD);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -6157,7 +6251,7 @@ class HLoadFunctionPrototype V8_FINAL : public HUnaryOperation {
: HUnaryOperation(function) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnCalls);
+ SetDependsOnFlag(kCalls);
}
};
@@ -6302,10 +6396,10 @@ class HLoadKeyed V8_FINAL
set_representation(Representation::Tagged());
}
- SetGVNFlag(kDependsOnArrayElements);
+ SetDependsOnFlag(kArrayElements);
} else {
set_representation(Representation::Double());
- SetGVNFlag(kDependsOnDoubleArrayElements);
+ SetDependsOnFlag(kDoubleArrayElements);
}
} else {
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
@@ -6318,14 +6412,14 @@ class HLoadKeyed V8_FINAL
}
if (is_external()) {
- SetGVNFlag(kDependsOnExternalMemory);
+ SetDependsOnFlag(kExternalMemory);
} else if (is_fixed_typed_array()) {
- SetGVNFlag(kDependsOnTypedArrayElements);
+ SetDependsOnFlag(kTypedArrayElements);
} else {
UNREACHABLE();
}
// Native code could change the specialized array.
- SetGVNFlag(kDependsOnCalls);
+ SetDependsOnFlag(kCalls);
}
SetFlag(kUseGVN);
@@ -6449,7 +6543,8 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
}
virtual bool HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) V8_OVERRIDE {
- ASSERT(side_effect == kChangesNewSpacePromotion);
+ ASSERT(side_effect == kNewSpacePromotion);
+ if (!FLAG_use_write_barrier_elimination) return false;
new_space_dominator_ = dominator;
return false;
}
@@ -6489,8 +6584,7 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
}
bool NeedsWriteBarrier() {
- ASSERT(!(FLAG_track_double_fields && field_representation().IsDouble()) ||
- !has_transition());
+ ASSERT(!field_representation().IsDouble() || !has_transition());
if (IsSkipWriteBarrier()) return false;
if (field_representation().IsDouble()) return false;
if (field_representation().IsSmi()) return false;
@@ -6525,7 +6619,6 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
write_barrier_mode_(UPDATE_WRITE_BARRIER),
has_transition_(false),
store_mode_(store_mode) {
- if (!FLAG_smi_x64_store_opt) store_mode_ = INITIALIZING_STORE;
// Stores to a non existing in-object property are allowed only to the
// newly allocated objects (via HAllocate or HInnerAllocatedObject).
ASSERT(!access.IsInobject() || access.existing_inobject_property() ||
@@ -6533,7 +6626,7 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
SetOperandAt(0, obj);
SetOperandAt(1, val);
SetOperandAt(2, obj);
- access.SetGVNFlags(this, true);
+ access.SetGVNFlags(this, STORE);
}
HObjectAccess access_;
@@ -6548,12 +6641,12 @@ class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreNamedGeneric, HValue*,
Handle<String>, HValue*,
- StrictModeFlag);
+ StrictMode);
HValue* object() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
HValue* context() { return OperandAt(2); }
Handle<String> name() { return name_; }
- StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
+ StrictMode strict_mode() { return strict_mode_; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -6568,9 +6661,9 @@ class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
HValue* object,
Handle<String> name,
HValue* value,
- StrictModeFlag strict_mode_flag)
+ StrictMode strict_mode)
: name_(name),
- strict_mode_flag_(strict_mode_flag) {
+ strict_mode_(strict_mode) {
SetOperandAt(0, object);
SetOperandAt(1, value);
SetOperandAt(2, context);
@@ -6578,7 +6671,7 @@ class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
}
Handle<String> name_;
- StrictModeFlag strict_mode_flag_;
+ StrictMode strict_mode_;
};
@@ -6681,7 +6774,7 @@ class HStoreKeyed V8_FINAL
virtual bool HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) V8_OVERRIDE {
- ASSERT(side_effect == kChangesNewSpacePromotion);
+ ASSERT(side_effect == kNewSpacePromotion);
new_space_dominator_ = dominator;
return false;
}
@@ -6714,7 +6807,6 @@ class HStoreKeyed V8_FINAL
is_uninitialized_(false),
store_mode_(store_mode),
new_space_dominator_(NULL) {
- if (!FLAG_smi_x64_store_opt) store_mode_ = INITIALIZING_STORE;
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
@@ -6724,20 +6816,20 @@ class HStoreKeyed V8_FINAL
if (IsFastObjectElementsKind(elements_kind)) {
SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kDependsOnNewSpacePromotion);
+ SetDependsOnFlag(kNewSpacePromotion);
}
if (is_external()) {
- SetGVNFlag(kChangesExternalMemory);
+ SetChangesFlag(kExternalMemory);
SetFlag(kAllowUndefinedAsNaN);
} else if (IsFastDoubleElementsKind(elements_kind)) {
- SetGVNFlag(kChangesDoubleArrayElements);
+ SetChangesFlag(kDoubleArrayElements);
} else if (IsFastSmiElementsKind(elements_kind)) {
- SetGVNFlag(kChangesArrayElements);
+ SetChangesFlag(kArrayElements);
} else if (is_fixed_typed_array()) {
- SetGVNFlag(kChangesTypedArrayElements);
+ SetChangesFlag(kTypedArrayElements);
SetFlag(kAllowUndefinedAsNaN);
} else {
- SetGVNFlag(kChangesArrayElements);
+ SetChangesFlag(kArrayElements);
}
// EXTERNAL_{UNSIGNED_,}{BYTE,SHORT,INT}_ELEMENTS are truncating.
@@ -6761,13 +6853,13 @@ class HStoreKeyed V8_FINAL
class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreKeyedGeneric, HValue*,
- HValue*, HValue*, StrictModeFlag);
+ HValue*, HValue*, StrictMode);
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
HValue* context() { return OperandAt(3); }
- StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
+ StrictMode strict_mode() { return strict_mode_; }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// tagged[tagged] = tagged
@@ -6783,8 +6875,8 @@ class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
HValue* object,
HValue* key,
HValue* value,
- StrictModeFlag strict_mode_flag)
- : strict_mode_flag_(strict_mode_flag) {
+ StrictMode strict_mode)
+ : strict_mode_(strict_mode) {
SetOperandAt(0, object);
SetOperandAt(1, key);
SetOperandAt(2, value);
@@ -6792,7 +6884,7 @@ class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
SetAllSideEffects();
}
- StrictModeFlag strict_mode_flag_;
+ StrictMode strict_mode_;
};
@@ -6829,6 +6921,8 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
transitioned_map_ == instr->transitioned_map_;
}
+ virtual int RedefinedOperandIndex() { return 0; }
+
private:
HTransitionElementsKind(HValue* context,
HValue* object,
@@ -6841,10 +6935,10 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(0, object);
SetOperandAt(1, context);
SetFlag(kUseGVN);
- SetGVNFlag(kChangesElementsKind);
+ SetChangesFlag(kElementsKind);
if (!IsSimpleMapChangeTransition(from_kind_, to_kind_)) {
- SetGVNFlag(kChangesElementsPointer);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kElementsPointer);
+ SetChangesFlag(kNewSpacePromotion);
}
set_representation(Representation::Tagged());
}
@@ -6895,8 +6989,8 @@ class HStringAdd V8_FINAL : public HBinaryOperation {
flags_(flags), pretenure_flag_(pretenure_flag) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetDependsOnFlag(kMaps);
+ SetChangesFlag(kNewSpacePromotion);
if (FLAG_trace_pretenuring) {
PrintF("HStringAdd with AllocationSite %p %s\n",
allocation_site.is_null()
@@ -6947,9 +7041,9 @@ class HStringCharCodeAt V8_FINAL : public HTemplateInstruction<3> {
SetOperandAt(2, index);
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnStringChars);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetDependsOnFlag(kMaps);
+ SetDependsOnFlag(kStringChars);
+ SetChangesFlag(kNewSpacePromotion);
}
// No side effects: runtime function assumes string + number inputs.
@@ -6983,7 +7077,7 @@ class HStringCharFromCode V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, char_code);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
virtual bool IsDeletable() const V8_OVERRIDE {
@@ -7078,7 +7172,7 @@ class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
bool pretenure() const { return pretenure_; }
bool has_no_literals() const { return has_no_literals_; }
bool is_generator() const { return is_generator_; }
- LanguageMode language_mode() const { return language_mode_; }
+ StrictMode strict_mode() const { return strict_mode_; }
private:
HFunctionLiteral(HValue* context,
@@ -7089,10 +7183,10 @@ class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
pretenure_(pretenure),
has_no_literals_(shared->num_literals() == 0),
is_generator_(shared->is_generator()),
- language_mode_(shared->language_mode()) {
+ strict_mode_(shared->strict_mode()) {
SetOperandAt(0, context);
set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -7101,7 +7195,7 @@ class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
bool pretenure_ : 1;
bool has_no_literals_ : 1;
bool is_generator_ : 1;
- LanguageMode language_mode_;
+ StrictMode strict_mode_;
};
@@ -7163,7 +7257,7 @@ class HToFastProperties V8_FINAL : public HUnaryOperation {
private:
explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
+ SetChangesFlag(kNewSpacePromotion);
// This instruction is not marked as kChangesMaps, but does
// change the map of the input operand. Use it only when creating
@@ -7171,7 +7265,7 @@ class HToFastProperties V8_FINAL : public HUnaryOperation {
ASSERT(value->IsCallRuntime());
#ifdef DEBUG
const Runtime::Function* function = HCallRuntime::cast(value)->function();
- ASSERT(function->function_id == Runtime::kCreateObjectLiteral);
+ ASSERT(function->function_id == Runtime::kHiddenCreateObjectLiteral);
#endif
}
@@ -7242,7 +7336,7 @@ class HSeqStringGetChar V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, index);
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnStringChars);
+ SetDependsOnFlag(kStringChars);
}
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
@@ -7281,7 +7375,7 @@ class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<4> {
SetOperandAt(2, index);
SetOperandAt(3, value);
set_representation(Representation::Tagged());
- SetGVNFlag(kChangesStringChars);
+ SetChangesFlag(kStringChars);
}
String::Encoding encoding_;
@@ -7321,8 +7415,8 @@ class HCheckMapValue V8_FINAL : public HTemplateInstruction<2> {
SetOperandAt(1, map);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kDependsOnElementsKind);
+ SetDependsOnFlag(kMaps);
+ SetDependsOnFlag(kElementsKind);
}
};
diff --git a/deps/v8/src/hydrogen-load-elimination.cc b/deps/v8/src/hydrogen-load-elimination.cc
index 0c7de8516..f84eac046 100644
--- a/deps/v8/src/hydrogen-load-elimination.cc
+++ b/deps/v8/src/hydrogen-load-elimination.cc
@@ -100,26 +100,33 @@ class HLoadEliminationTable : public ZoneObject {
}
break;
}
+ case HValue::kTransitionElementsKind: {
+ HTransitionElementsKind* t = HTransitionElementsKind::cast(instr);
+ HValue* object = t->object()->ActualValue();
+ KillFieldInternal(object, FieldOf(JSArray::kElementsOffset), NULL);
+ KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
+ break;
+ }
default: {
- if (instr->CheckGVNFlag(kChangesInobjectFields)) {
+ if (instr->CheckChangesFlag(kInobjectFields)) {
TRACE((" kill-all i%d\n", instr->id()));
Kill();
break;
}
- if (instr->CheckGVNFlag(kChangesMaps)) {
+ if (instr->CheckChangesFlag(kMaps)) {
TRACE((" kill-maps i%d\n", instr->id()));
KillOffset(JSObject::kMapOffset);
}
- if (instr->CheckGVNFlag(kChangesElementsKind)) {
+ if (instr->CheckChangesFlag(kElementsKind)) {
TRACE((" kill-elements-kind i%d\n", instr->id()));
KillOffset(JSObject::kMapOffset);
KillOffset(JSObject::kElementsOffset);
}
- if (instr->CheckGVNFlag(kChangesElementsPointer)) {
+ if (instr->CheckChangesFlag(kElementsPointer)) {
TRACE((" kill-elements i%d\n", instr->id()));
KillOffset(JSObject::kElementsOffset);
}
- if (instr->CheckGVNFlag(kChangesOsrEntries)) {
+ if (instr->CheckChangesFlag(kOsrEntries)) {
TRACE((" kill-osr i%d\n", instr->id()));
Kill();
}
@@ -134,8 +141,32 @@ class HLoadEliminationTable : public ZoneObject {
return this;
}
- // Support for global analysis with HFlowEngine: Copy state to successor
- // block.
+ // Support for global analysis with HFlowEngine: Merge given state with
+ // the other incoming state.
+ static HLoadEliminationTable* Merge(HLoadEliminationTable* succ_state,
+ HBasicBlock* succ_block,
+ HLoadEliminationTable* pred_state,
+ HBasicBlock* pred_block,
+ Zone* zone) {
+ ASSERT(pred_state != NULL);
+ if (succ_state == NULL) {
+ return pred_state->Copy(succ_block, pred_block, zone);
+ } else {
+ return succ_state->Merge(succ_block, pred_state, pred_block, zone);
+ }
+ }
+
+ // Support for global analysis with HFlowEngine: Given state merged with all
+ // the other incoming states, prepare it for use.
+ static HLoadEliminationTable* Finish(HLoadEliminationTable* state,
+ HBasicBlock* block,
+ Zone* zone) {
+ ASSERT(state != NULL);
+ return state;
+ }
+
+ private:
+ // Copy state to successor block.
HLoadEliminationTable* Copy(HBasicBlock* succ, HBasicBlock* from_block,
Zone* zone) {
HLoadEliminationTable* copy =
@@ -151,8 +182,7 @@ class HLoadEliminationTable : public ZoneObject {
return copy;
}
- // Support for global analysis with HFlowEngine: Merge this state with
- // the other incoming state.
+ // Merge this state with the other incoming state.
HLoadEliminationTable* Merge(HBasicBlock* succ, HLoadEliminationTable* that,
HBasicBlock* that_block, Zone* zone) {
if (that->fields_.length() < fields_.length()) {
@@ -432,11 +462,7 @@ class HLoadEliminationTable : public ZoneObject {
class HLoadEliminationEffects : public ZoneObject {
public:
explicit HLoadEliminationEffects(Zone* zone)
- : zone_(zone),
- maps_stored_(false),
- fields_stored_(false),
- elements_stored_(false),
- stores_(5, zone) { }
+ : zone_(zone), stores_(5, zone) { }
inline bool Disabled() {
return false; // Effects are _not_ disabled.
@@ -444,37 +470,25 @@ class HLoadEliminationEffects : public ZoneObject {
// Process a possibly side-effecting instruction.
void Process(HInstruction* instr, Zone* zone) {
- switch (instr->opcode()) {
- case HValue::kStoreNamedField: {
- stores_.Add(HStoreNamedField::cast(instr), zone_);
- break;
- }
- case HValue::kOsrEntry: {
- // Kill everything. Loads must not be hoisted past the OSR entry.
- maps_stored_ = true;
- fields_stored_ = true;
- elements_stored_ = true;
- }
- default: {
- fields_stored_ |= instr->CheckGVNFlag(kChangesInobjectFields);
- maps_stored_ |= instr->CheckGVNFlag(kChangesMaps);
- maps_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
- elements_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
- elements_stored_ |= instr->CheckGVNFlag(kChangesElementsPointer);
- }
+ if (instr->IsStoreNamedField()) {
+ stores_.Add(HStoreNamedField::cast(instr), zone_);
+ } else {
+ flags_.Add(instr->ChangesFlags());
}
}
// Apply these effects to the given load elimination table.
void Apply(HLoadEliminationTable* table) {
- if (fields_stored_) {
+ // Loads must not be hoisted past the OSR entry, therefore we kill
+ // everything if we see an OSR entry.
+ if (flags_.Contains(kInobjectFields) || flags_.Contains(kOsrEntries)) {
table->Kill();
return;
}
- if (maps_stored_) {
+ if (flags_.Contains(kElementsKind) || flags_.Contains(kMaps)) {
table->KillOffset(JSObject::kMapOffset);
}
- if (elements_stored_) {
+ if (flags_.Contains(kElementsKind) || flags_.Contains(kElementsPointer)) {
table->KillOffset(JSObject::kElementsOffset);
}
@@ -486,9 +500,7 @@ class HLoadEliminationEffects : public ZoneObject {
// Union these effects with the other effects.
void Union(HLoadEliminationEffects* that, Zone* zone) {
- maps_stored_ |= that->maps_stored_;
- fields_stored_ |= that->fields_stored_;
- elements_stored_ |= that->elements_stored_;
+ flags_.Add(that->flags_);
for (int i = 0; i < that->stores_.length(); i++) {
stores_.Add(that->stores_[i], zone);
}
@@ -496,9 +508,7 @@ class HLoadEliminationEffects : public ZoneObject {
private:
Zone* zone_;
- bool maps_stored_ : 1;
- bool fields_stored_ : 1;
- bool elements_stored_ : 1;
+ GVNFlagSet flags_;
ZoneList<HStoreNamedField*> stores_;
};
diff --git a/deps/v8/src/hydrogen-minus-zero.cc b/deps/v8/src/hydrogen-minus-zero.cc
deleted file mode 100644
index 316e0f507..000000000
--- a/deps/v8/src/hydrogen-minus-zero.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen-minus-zero.h"
-
-namespace v8 {
-namespace internal {
-
-void HComputeMinusZeroChecksPhase::Run() {
- const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
- for (int i = 0; i < blocks->length(); ++i) {
- for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
- if (current->IsChange()) {
- HChange* change = HChange::cast(current);
- // Propagate flags for negative zero checks upwards from conversions
- // int32-to-tagged and int32-to-double.
- Representation from = change->value()->representation();
- ASSERT(from.Equals(change->from()));
- if (from.IsSmiOrInteger32()) {
- ASSERT(change->to().IsTagged() ||
- change->to().IsDouble() ||
- change->to().IsSmiOrInteger32());
- ASSERT(visited_.IsEmpty());
- PropagateMinusZeroChecks(change->value());
- visited_.Clear();
- }
- } else if (current->IsCompareMinusZeroAndBranch()) {
- HCompareMinusZeroAndBranch* check =
- HCompareMinusZeroAndBranch::cast(current);
- if (check->value()->representation().IsSmiOrInteger32()) {
- ASSERT(visited_.IsEmpty());
- PropagateMinusZeroChecks(check->value());
- visited_.Clear();
- }
- }
- }
- }
-}
-
-
-void HComputeMinusZeroChecksPhase::PropagateMinusZeroChecks(HValue* value) {
- for (HValue* current = value;
- current != NULL && !visited_.Contains(current->id());
- current = current->EnsureAndPropagateNotMinusZero(&visited_)) {
- // For phis, we must propagate the check to all of its inputs.
- if (current->IsPhi()) {
- visited_.Add(current->id());
- HPhi* phi = HPhi::cast(current);
- for (int i = 0; i < phi->OperandCount(); ++i) {
- PropagateMinusZeroChecks(phi->OperandAt(i));
- }
- break;
- }
-
- // For multiplication, division, and Math.min/max(), we must propagate
- // to the left and the right side.
- if (current->IsMul() || current->IsDiv() || current->IsMathMinMax()) {
- HBinaryOperation* operation = HBinaryOperation::cast(current);
- operation->EnsureAndPropagateNotMinusZero(&visited_);
- PropagateMinusZeroChecks(operation->left());
- PropagateMinusZeroChecks(operation->right());
- }
- }
-}
-
-} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-range-analysis.cc b/deps/v8/src/hydrogen-range-analysis.cc
index 76fd5f35f..9d58fc89f 100644
--- a/deps/v8/src/hydrogen-range-analysis.cc
+++ b/deps/v8/src/hydrogen-range-analysis.cc
@@ -78,7 +78,29 @@ void HRangeAnalysisPhase::Run() {
// Go through all instructions of the current block.
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- InferRange(it.Current());
+ HValue* value = it.Current();
+ InferRange(value);
+
+ // Compute the bailout-on-minus-zero flag.
+ if (value->IsChange()) {
+ HChange* instr = HChange::cast(value);
+ // Propagate flags for negative zero checks upwards from conversions
+ // int32-to-tagged and int32-to-double.
+ Representation from = instr->value()->representation();
+ ASSERT(from.Equals(instr->from()));
+ if (from.IsSmiOrInteger32()) {
+ ASSERT(instr->to().IsTagged() ||
+ instr->to().IsDouble() ||
+ instr->to().IsSmiOrInteger32());
+ PropagateMinusZeroChecks(instr->value());
+ }
+ } else if (value->IsCompareMinusZeroAndBranch()) {
+ HCompareMinusZeroAndBranch* instr =
+ HCompareMinusZeroAndBranch::cast(value);
+ if (instr->value()->representation().IsSmiOrInteger32()) {
+ PropagateMinusZeroChecks(instr->value());
+ }
+ }
}
// Continue analysis in all dominated blocks.
@@ -197,4 +219,79 @@ void HRangeAnalysisPhase::AddRange(HValue* value, Range* range) {
}
+void HRangeAnalysisPhase::PropagateMinusZeroChecks(HValue* value) {
+ ASSERT(worklist_.is_empty());
+ ASSERT(in_worklist_.IsEmpty());
+
+ AddToWorklist(value);
+ while (!worklist_.is_empty()) {
+ value = worklist_.RemoveLast();
+
+ if (value->IsPhi()) {
+ // For phis, we must propagate the check to all of its inputs.
+ HPhi* phi = HPhi::cast(value);
+ for (int i = 0; i < phi->OperandCount(); ++i) {
+ AddToWorklist(phi->OperandAt(i));
+ }
+ } else if (value->IsUnaryMathOperation()) {
+ HUnaryMathOperation* instr = HUnaryMathOperation::cast(value);
+ if (instr->representation().IsSmiOrInteger32() &&
+ !instr->value()->representation().Equals(instr->representation())) {
+ if (instr->value()->range() == NULL ||
+ instr->value()->range()->CanBeMinusZero()) {
+ instr->SetFlag(HValue::kBailoutOnMinusZero);
+ }
+ }
+ if (instr->RequiredInputRepresentation(0).IsSmiOrInteger32() &&
+ instr->representation().Equals(
+ instr->RequiredInputRepresentation(0))) {
+ AddToWorklist(instr->value());
+ }
+ } else if (value->IsChange()) {
+ HChange* instr = HChange::cast(value);
+ if (!instr->from().IsSmiOrInteger32() &&
+ !instr->CanTruncateToInt32() &&
+ (instr->value()->range() == NULL ||
+ instr->value()->range()->CanBeMinusZero())) {
+ instr->SetFlag(HValue::kBailoutOnMinusZero);
+ }
+ } else if (value->IsForceRepresentation()) {
+ HForceRepresentation* instr = HForceRepresentation::cast(value);
+ AddToWorklist(instr->value());
+ } else if (value->IsMod()) {
+ HMod* instr = HMod::cast(value);
+ if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
+ instr->SetFlag(HValue::kBailoutOnMinusZero);
+ AddToWorklist(instr->left());
+ }
+ } else if (value->IsDiv() || value->IsMul()) {
+ HBinaryOperation* instr = HBinaryOperation::cast(value);
+ if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
+ instr->SetFlag(HValue::kBailoutOnMinusZero);
+ }
+ AddToWorklist(instr->right());
+ AddToWorklist(instr->left());
+ } else if (value->IsMathFloorOfDiv()) {
+ HMathFloorOfDiv* instr = HMathFloorOfDiv::cast(value);
+ instr->SetFlag(HValue::kBailoutOnMinusZero);
+ } else if (value->IsAdd() || value->IsSub()) {
+ HBinaryOperation* instr = HBinaryOperation::cast(value);
+ if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
+ // Propagate to the left argument. If the left argument cannot be -0,
+ // then the result of the add/sub operation cannot be either.
+ AddToWorklist(instr->left());
+ }
+ } else if (value->IsMathMinMax()) {
+ HMathMinMax* instr = HMathMinMax::cast(value);
+ AddToWorklist(instr->right());
+ AddToWorklist(instr->left());
+ }
+ }
+
+ in_worklist_.Clear();
+ ASSERT(in_worklist_.IsEmpty());
+ ASSERT(worklist_.is_empty());
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-range-analysis.h b/deps/v8/src/hydrogen-range-analysis.h
index a1e9737c5..e0cc3c5da 100644
--- a/deps/v8/src/hydrogen-range-analysis.h
+++ b/deps/v8/src/hydrogen-range-analysis.h
@@ -37,7 +37,9 @@ namespace internal {
class HRangeAnalysisPhase : public HPhase {
public:
explicit HRangeAnalysisPhase(HGraph* graph)
- : HPhase("H_Range analysis", graph), changed_ranges_(16, zone()) { }
+ : HPhase("H_Range analysis", graph), changed_ranges_(16, zone()),
+ in_worklist_(graph->GetMaximumValueID(), zone()),
+ worklist_(32, zone()) {}
void Run();
@@ -49,8 +51,19 @@ class HRangeAnalysisPhase : public HPhase {
void InferRange(HValue* value);
void RollBackTo(int index);
void AddRange(HValue* value, Range* range);
+ void AddToWorklist(HValue* value) {
+ if (in_worklist_.Contains(value->id())) return;
+ in_worklist_.Add(value->id());
+ worklist_.Add(value, zone());
+ }
+ void PropagateMinusZeroChecks(HValue* value);
ZoneList<HValue*> changed_ranges_;
+
+ BitVector in_worklist_;
+ ZoneList<HValue*> worklist_;
+
+ DISALLOW_COPY_AND_ASSIGN(HRangeAnalysisPhase);
};
diff --git a/deps/v8/src/hydrogen-representation-changes.cc b/deps/v8/src/hydrogen-representation-changes.cc
index 07fc8be38..0b87d12eb 100644
--- a/deps/v8/src/hydrogen-representation-changes.cc
+++ b/deps/v8/src/hydrogen-representation-changes.cc
@@ -61,10 +61,11 @@ void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
if (new_value == NULL) {
new_value = new(graph()->zone()) HChange(
value, to, is_truncating_to_smi, is_truncating_to_int);
- if (use_value->operand_position(use_index) != RelocInfo::kNoPosition) {
+ if (!use_value->operand_position(use_index).IsUnknown()) {
new_value->set_position(use_value->operand_position(use_index));
} else {
- ASSERT(!FLAG_emit_opt_code_positions || !graph()->info()->IsOptimizing());
+ ASSERT(!FLAG_hydrogen_track_positions ||
+ !graph()->info()->IsOptimizing());
}
}
@@ -77,7 +78,10 @@ void HRepresentationChangesPhase::InsertRepresentationChangesForValue(
HValue* value) {
Representation r = value->representation();
if (r.IsNone()) return;
- if (value->HasNoUses()) return;
+ if (value->HasNoUses()) {
+ if (value->IsForceRepresentation()) value->DeleteAndReplaceWith(NULL);
+ return;
+ }
for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
HValue* use_value = it.value();
diff --git a/deps/v8/src/hydrogen-store-elimination.cc b/deps/v8/src/hydrogen-store-elimination.cc
new file mode 100644
index 000000000..2e6ee5138
--- /dev/null
+++ b/deps/v8/src/hydrogen-store-elimination.cc
@@ -0,0 +1,139 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-store-elimination.h"
+#include "hydrogen-instructions.h"
+
+namespace v8 {
+namespace internal {
+
+#define TRACE(x) if (FLAG_trace_store_elimination) PrintF x
+
+// Performs a block-by-block local analysis for removable stores.
+void HStoreEliminationPhase::Run() {
+ GVNFlagSet flags; // Use GVN flags as an approximation for some instructions.
+ flags.RemoveAll();
+
+ flags.Add(kArrayElements);
+ flags.Add(kArrayLengths);
+ flags.Add(kStringLengths);
+ flags.Add(kBackingStoreFields);
+ flags.Add(kDoubleArrayElements);
+ flags.Add(kDoubleFields);
+ flags.Add(kElementsPointer);
+ flags.Add(kInobjectFields);
+ flags.Add(kExternalMemory);
+ flags.Add(kStringChars);
+ flags.Add(kTypedArrayElements);
+
+ for (int i = 0; i < graph()->blocks()->length(); i++) {
+ unobserved_.Rewind(0);
+ HBasicBlock* block = graph()->blocks()->at(i);
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+
+ // TODO(titzer): eliminate unobserved HStoreKeyed instructions too.
+ switch (instr->opcode()) {
+ case HValue::kStoreNamedField:
+ // Remove any unobserved stores overwritten by this store.
+ ProcessStore(HStoreNamedField::cast(instr));
+ break;
+ case HValue::kLoadNamedField:
+ // Observe any unobserved stores on this object + field.
+ ProcessLoad(HLoadNamedField::cast(instr));
+ break;
+ default:
+ ProcessInstr(instr, flags);
+ break;
+ }
+ }
+ }
+}
+
+
+void HStoreEliminationPhase::ProcessStore(HStoreNamedField* store) {
+ HValue* object = store->object()->ActualValue();
+ int i = 0;
+ while (i < unobserved_.length()) {
+ HStoreNamedField* prev = unobserved_.at(i);
+ if (aliasing_->MustAlias(object, prev->object()->ActualValue()) &&
+ store->access().Equals(prev->access())) {
+ // This store is guaranteed to overwrite the previous store.
+ prev->DeleteAndReplaceWith(NULL);
+ TRACE(("++ Unobserved store S%d overwritten by S%d\n",
+ prev->id(), store->id()));
+ unobserved_.Remove(i);
+ } else {
+ // TODO(titzer): remove map word clearing from folded allocations.
+ i++;
+ }
+ }
+ // Only non-transitioning stores are removable.
+ if (!store->has_transition()) {
+ TRACE(("-- Might remove store S%d\n", store->id()));
+ unobserved_.Add(store, zone());
+ }
+}
+
+
+void HStoreEliminationPhase::ProcessLoad(HLoadNamedField* load) {
+ HValue* object = load->object()->ActualValue();
+ int i = 0;
+ while (i < unobserved_.length()) {
+ HStoreNamedField* prev = unobserved_.at(i);
+ if (aliasing_->MayAlias(object, prev->object()->ActualValue()) &&
+ load->access().Equals(prev->access())) {
+ TRACE(("-- Observed store S%d by load L%d\n", prev->id(), load->id()));
+ unobserved_.Remove(i);
+ } else {
+ i++;
+ }
+ }
+}
+
+
+void HStoreEliminationPhase::ProcessInstr(HInstruction* instr,
+ GVNFlagSet flags) {
+ if (unobserved_.length() == 0) return; // Nothing to do.
+ if (instr->CanDeoptimize()) {
+ TRACE(("-- Observed stores at I%d (might deoptimize)\n", instr->id()));
+ unobserved_.Rewind(0);
+ return;
+ }
+ if (instr->CheckChangesFlag(kNewSpacePromotion)) {
+ TRACE(("-- Observed stores at I%d (might GC)\n", instr->id()));
+ unobserved_.Rewind(0);
+ return;
+ }
+ if (instr->ChangesFlags().ContainsAnyOf(flags)) {
+ TRACE(("-- Observed stores at I%d (GVN flags)\n", instr->id()));
+ unobserved_.Rewind(0);
+ return;
+ }
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-minus-zero.h b/deps/v8/src/hydrogen-store-elimination.h
index d23ec1196..7dc871c9b 100644
--- a/deps/v8/src/hydrogen-minus-zero.h
+++ b/deps/v8/src/hydrogen-store-elimination.h
@@ -25,32 +25,33 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_HYDROGEN_MINUS_ZERO_H_
-#define V8_HYDROGEN_MINUS_ZERO_H_
+#ifndef V8_HYDROGEN_STORE_ELIMINATION_H_
+#define V8_HYDROGEN_STORE_ELIMINATION_H_
#include "hydrogen.h"
+#include "hydrogen-alias-analysis.h"
namespace v8 {
namespace internal {
-
-class HComputeMinusZeroChecksPhase : public HPhase {
+class HStoreEliminationPhase : public HPhase {
public:
- explicit HComputeMinusZeroChecksPhase(HGraph* graph)
- : HPhase("H_Compute minus zero checks", graph),
- visited_(graph->GetMaximumValueID(), zone()) { }
+ explicit HStoreEliminationPhase(HGraph* graph)
+ : HPhase("H_Store elimination", graph),
+ unobserved_(10, zone()),
+ aliasing_() { }
void Run();
-
private:
- void PropagateMinusZeroChecks(HValue* value);
-
- BitVector visited_;
+ ZoneList<HStoreNamedField*> unobserved_;
+ HAliasAnalyzer* aliasing_;
- DISALLOW_COPY_AND_ASSIGN(HComputeMinusZeroChecksPhase);
+ void ProcessStore(HStoreNamedField* store);
+ void ProcessLoad(HLoadNamedField* load);
+ void ProcessInstr(HInstruction* instr, GVNFlagSet flags);
};
} } // namespace v8::internal
-#endif // V8_HYDROGEN_MINUS_ZERO_H_
+#endif
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 16096ccf9..a7ef0cbd0 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -48,13 +48,13 @@
#include "hydrogen-gvn.h"
#include "hydrogen-mark-deoptimize.h"
#include "hydrogen-mark-unreachable.h"
-#include "hydrogen-minus-zero.h"
#include "hydrogen-osr.h"
#include "hydrogen-range-analysis.h"
#include "hydrogen-redundant-phi.h"
#include "hydrogen-removable-simulates.h"
#include "hydrogen-representation-changes.h"
#include "hydrogen-sce.h"
+#include "hydrogen-store-elimination.h"
#include "hydrogen-uint32-analysis.h"
#include "lithium-allocator.h"
#include "parser.h"
@@ -68,6 +68,8 @@
#include "ia32/lithium-codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-codegen-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/lithium-codegen-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -141,12 +143,13 @@ void HBasicBlock::RemovePhi(HPhi* phi) {
}
-void HBasicBlock::AddInstruction(HInstruction* instr, int position) {
+void HBasicBlock::AddInstruction(HInstruction* instr,
+ HSourcePosition position) {
ASSERT(!IsStartBlock() || !IsFinished());
ASSERT(!instr->IsLinked());
ASSERT(!IsFinished());
- if (position != RelocInfo::kNoPosition) {
+ if (!position.IsUnknown()) {
instr->set_position(position);
}
if (first_ == NULL) {
@@ -154,10 +157,10 @@ void HBasicBlock::AddInstruction(HInstruction* instr, int position) {
ASSERT(!last_environment()->ast_id().IsNone());
HBlockEntry* entry = new(zone()) HBlockEntry();
entry->InitializeAsFirst(this);
- if (position != RelocInfo::kNoPosition) {
+ if (!position.IsUnknown()) {
entry->set_position(position);
} else {
- ASSERT(!FLAG_emit_opt_code_positions ||
+ ASSERT(!FLAG_hydrogen_track_positions ||
!graph()->info()->IsOptimizing());
}
first_ = last_ = entry;
@@ -210,7 +213,7 @@ HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
}
-void HBasicBlock::Finish(HControlInstruction* end, int position) {
+void HBasicBlock::Finish(HControlInstruction* end, HSourcePosition position) {
ASSERT(!IsFinished());
AddInstruction(end, position);
end_ = end;
@@ -221,7 +224,7 @@ void HBasicBlock::Finish(HControlInstruction* end, int position) {
void HBasicBlock::Goto(HBasicBlock* block,
- int position,
+ HSourcePosition position,
FunctionState* state,
bool add_simulate) {
bool drop_extra = state != NULL &&
@@ -244,7 +247,7 @@ void HBasicBlock::Goto(HBasicBlock* block,
void HBasicBlock::AddLeaveInlined(HValue* return_value,
FunctionState* state,
- int position) {
+ HSourcePosition position) {
HBasicBlock* target = state->function_return();
bool drop_extra = state->inlining_kind() == NORMAL_RETURN;
@@ -337,6 +340,15 @@ void HBasicBlock::PostProcessLoopHeader(IterationStatement* stmt) {
}
+void HBasicBlock::MarkSuccEdgeUnreachable(int succ) {
+ ASSERT(IsFinished());
+ HBasicBlock* succ_block = end()->SuccessorAt(succ);
+
+ ASSERT(succ_block->predecessors()->length() == 1);
+ succ_block->MarkUnreachable();
+}
+
+
void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
if (HasPredecessor()) {
// Only loop header blocks can have a predecessor added after
@@ -696,10 +708,10 @@ HConstant* HGraph::GetConstant##Name() { \
Unique<Object>::CreateImmovable(isolate()->factory()->name##_value()), \
Representation::Tagged(), \
htype, \
- false, \
true, \
+ boolean_value, \
false, \
- boolean_value); \
+ ODDBALL_TYPE); \
constant->InsertAfter(entry_block()->first()); \
constant_##name##_.set(constant); \
} \
@@ -1032,9 +1044,9 @@ void HGraphBuilder::IfBuilder::End() {
current = merge_at_join_blocks_;
while (current != NULL) {
if (current->deopt_ && current->block_ != NULL) {
- builder_->PadEnvironmentForContinuation(current->block_,
- merge_block);
- builder_->GotoNoSimulate(current->block_, merge_block);
+ current->block_->FinishExit(
+ HAbnormalExit::New(builder_->zone(), NULL),
+ HSourcePosition::Unknown());
}
current = current->next_;
}
@@ -1167,9 +1179,10 @@ HGraph* HGraphBuilder::CreateGraph() {
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
ASSERT(current_block() != NULL);
- ASSERT(!FLAG_emit_opt_code_positions ||
- position_ != RelocInfo::kNoPosition || !info_->IsOptimizing());
- current_block()->AddInstruction(instr, position_);
+ ASSERT(!FLAG_hydrogen_track_positions ||
+ !position_.IsUnknown() ||
+ !info_->IsOptimizing());
+ current_block()->AddInstruction(instr, source_position());
if (graph()->IsInsideNoSideEffectsScope()) {
instr->SetFlag(HValue::kHasNoObservableSideEffects);
}
@@ -1178,9 +1191,10 @@ HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
- ASSERT(!FLAG_emit_opt_code_positions || !info_->IsOptimizing() ||
- position_ != RelocInfo::kNoPosition);
- current_block()->Finish(last, position_);
+ ASSERT(!FLAG_hydrogen_track_positions ||
+ !info_->IsOptimizing() ||
+ !position_.IsUnknown());
+ current_block()->Finish(last, source_position());
if (last->IsReturn() || last->IsAbnormalExit()) {
set_current_block(NULL);
}
@@ -1188,9 +1202,9 @@ void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) {
- ASSERT(!FLAG_emit_opt_code_positions || !info_->IsOptimizing() ||
- position_ != RelocInfo::kNoPosition);
- current_block()->FinishExit(instruction, position_);
+ ASSERT(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() ||
+ !position_.IsUnknown());
+ current_block()->FinishExit(instruction, source_position());
if (instruction->IsReturn() || instruction->IsAbnormalExit()) {
set_current_block(NULL);
}
@@ -1214,7 +1228,7 @@ void HGraphBuilder::AddSimulate(BailoutId id,
RemovableSimulate removable) {
ASSERT(current_block() != NULL);
ASSERT(!graph()->IsInsideNoSideEffectsScope());
- current_block()->AddNewSimulate(id, position_, removable);
+ current_block()->AddNewSimulate(id, source_position(), removable);
}
@@ -1240,38 +1254,9 @@ HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
}
-void HGraphBuilder::FinishExitWithHardDeoptimization(
- const char* reason, HBasicBlock* continuation) {
- PadEnvironmentForContinuation(current_block(), continuation);
+void HGraphBuilder::FinishExitWithHardDeoptimization(const char* reason) {
Add<HDeoptimize>(reason, Deoptimizer::EAGER);
- if (graph()->IsInsideNoSideEffectsScope()) {
- GotoNoSimulate(continuation);
- } else {
- Goto(continuation);
- }
-}
-
-
-void HGraphBuilder::PadEnvironmentForContinuation(
- HBasicBlock* from,
- HBasicBlock* continuation) {
- if (continuation->last_environment() != NULL) {
- // When merging from a deopt block to a continuation, resolve differences in
- // environment by pushing constant 0 and popping extra values so that the
- // environments match during the join. Push 0 since it has the most specific
- // representation, and will not influence representation inference of the
- // phi.
- int continuation_env_length = continuation->last_environment()->length();
- while (continuation_env_length != from->last_environment()->length()) {
- if (continuation_env_length > from->last_environment()->length()) {
- from->last_environment()->Push(graph()->GetConstant0());
- } else {
- from->last_environment()->Pop();
- }
- }
- } else {
- ASSERT(continuation->predecessors()->length() == 0);
- }
+ FinishExitCurrentBlock(New<HAbnormalExit>());
}
@@ -1298,19 +1283,20 @@ HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* function) {
Handle<JSFunction> f = Handle<JSFunction>::cast(
HConstant::cast(function)->handle(isolate()));
SharedFunctionInfo* shared = f->shared();
- if (!shared->is_classic_mode() || shared->native()) return object;
+ if (shared->strict_mode() == STRICT || shared->native()) return object;
}
return Add<HWrapReceiver>(object, function);
}
-HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
- HValue* elements,
- ElementsKind kind,
- HValue* length,
- HValue* key,
- bool is_js_array,
- bool is_store) {
+HValue* HGraphBuilder::BuildCheckForCapacityGrow(
+ HValue* object,
+ HValue* elements,
+ ElementsKind kind,
+ HValue* length,
+ HValue* key,
+ bool is_js_array,
+ PropertyAccessType access_type) {
IfBuilder length_checker(this);
Token::Value token = IsHoleyElementsKind(kind) ? Token::GTE : Token::EQ;
@@ -1353,7 +1339,7 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
new_length);
}
- if (is_store && kind == FAST_SMI_ELEMENTS) {
+ if (access_type == STORE && kind == FAST_SMI_ELEMENTS) {
HValue* checked_elements = environment()->Top();
// Write zero to ensure that the new element is initialized with some smi.
@@ -1464,7 +1450,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoadHelper(
HValue* candidate_key = Add<HLoadKeyed>(elements, key_index,
static_cast<HValue*>(NULL),
- FAST_SMI_ELEMENTS);
+ FAST_ELEMENTS);
IfBuilder key_compare(this);
key_compare.IfNot<HCompareObjectEqAndBranch>(key, candidate_key);
@@ -1490,7 +1476,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoadHelper(
HValue* details = Add<HLoadKeyed>(elements, details_index,
static_cast<HValue*>(NULL),
- FAST_SMI_ELEMENTS);
+ FAST_ELEMENTS);
IfBuilder details_compare(this);
details_compare.If<HCompareNumericAndBranch>(details,
graph()->GetConstant0(),
@@ -1560,7 +1546,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
elements,
Add<HConstant>(NameDictionary::kCapacityIndex),
static_cast<HValue*>(NULL),
- FAST_SMI_ELEMENTS);
+ FAST_ELEMENTS);
HValue* mask = AddUncasted<HSub>(capacity, graph()->GetConstant1());
mask->ChangeRepresentation(Representation::Integer32());
@@ -1689,7 +1675,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
}
if_objectissmi.Else();
{
- if (type->Is(Type::Smi())) {
+ if (type->Is(Type::SignedSmall())) {
if_objectissmi.Deopt("Expected smi");
} else {
// Check if the object is a heap number.
@@ -1768,7 +1754,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
Add<HPushArgument>(object);
Push(Add<HCallRuntime>(
isolate()->factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kNumberToStringSkipCache),
+ Runtime::FunctionForId(Runtime::kHiddenNumberToStringSkipCache),
1));
}
if_found.End();
@@ -1806,19 +1792,10 @@ HAllocate* HGraphBuilder::BuildAllocate(
HValue* HGraphBuilder::BuildAddStringLengths(HValue* left_length,
HValue* right_length) {
- // Compute the combined string length. If the result is larger than the max
- // supported string length, we bailout to the runtime. This is done implicitly
- // when converting the result back to a smi in case the max string length
- // equals the max smi value. Otherwise, for platforms with 32-bit smis, we do
+ // Compute the combined string length and check against max string length.
HValue* length = AddUncasted<HAdd>(left_length, right_length);
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- if (String::kMaxLength != Smi::kMaxValue) {
- IfBuilder if_nooverflow(this);
- if_nooverflow.If<HCompareNumericAndBranch>(
- length, Add<HConstant>(String::kMaxLength), Token::LTE);
- if_nooverflow.Then();
- if_nooverflow.ElseDeopt("String length exceeds limit");
- }
+ HValue* max_length = Add<HConstant>(String::kMaxLength);
+ Add<HBoundsCheck>(length, max_length);
return length;
}
@@ -1927,6 +1904,19 @@ void HGraphBuilder::BuildCopySeqStringChars(HValue* src,
}
+HValue* HGraphBuilder::BuildObjectSizeAlignment(
+ HValue* unaligned_size, int header_size) {
+ ASSERT((header_size & kObjectAlignmentMask) == 0);
+ HValue* size = AddUncasted<HAdd>(
+ unaligned_size, Add<HConstant>(static_cast<int32_t>(
+ header_size + kObjectAlignmentMask)));
+ size->ClearFlag(HValue::kCanOverflow);
+ return AddUncasted<HBitwise>(
+ Token::BIT_AND, size, Add<HConstant>(static_cast<int32_t>(
+ ~kObjectAlignmentMask)));
+}
+
+
HValue* HGraphBuilder::BuildUncheckedStringAdd(
HValue* left,
HValue* right,
@@ -2027,13 +2017,7 @@ HValue* HGraphBuilder::BuildUncheckedStringAdd(
// Calculate the number of bytes needed for the characters in the
// string while observing object alignment.
STATIC_ASSERT((SeqString::kHeaderSize & kObjectAlignmentMask) == 0);
- HValue* size = Pop();
- size = AddUncasted<HAdd>(size, Add<HConstant>(static_cast<int32_t>(
- SeqString::kHeaderSize + kObjectAlignmentMask)));
- size->ClearFlag(HValue::kCanOverflow);
- size = AddUncasted<HBitwise>(
- Token::BIT_AND, size, Add<HConstant>(static_cast<int32_t>(
- ~kObjectAlignmentMask)));
+ HValue* size = BuildObjectSizeAlignment(Pop(), SeqString::kHeaderSize);
// Allocate the string object. HAllocate does not care whether we pass
// STRING_TYPE or ASCII_STRING_TYPE here, so we just use STRING_TYPE here.
@@ -2092,9 +2076,10 @@ HValue* HGraphBuilder::BuildUncheckedStringAdd(
// Fallback to the runtime to add the two strings.
Add<HPushArgument>(left);
Add<HPushArgument>(right);
- Push(Add<HCallRuntime>(isolate()->factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kStringAdd),
- 2));
+ Push(Add<HCallRuntime>(
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd),
+ 2));
}
if_sameencodingandsequential.End();
}
@@ -2159,7 +2144,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
HValue* val,
bool is_js_array,
ElementsKind elements_kind,
- bool is_store,
+ PropertyAccessType access_type,
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode) {
ASSERT((!IsExternalArrayElementsKind(elements_kind) &&
@@ -2172,18 +2157,18 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
// for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
// generated store code.
if ((elements_kind == FAST_HOLEY_ELEMENTS) ||
- (elements_kind == FAST_ELEMENTS && is_store)) {
- checked_object->ClearGVNFlag(kDependsOnElementsKind);
+ (elements_kind == FAST_ELEMENTS && access_type == STORE)) {
+ checked_object->ClearDependsOnFlag(kElementsKind);
}
bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
bool fast_elements = IsFastObjectElementsKind(elements_kind);
HValue* elements = AddLoadElements(checked_object);
- if (is_store && (fast_elements || fast_smi_only_elements) &&
+ if (access_type == STORE && (fast_elements || fast_smi_only_elements) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
HCheckMaps* check_cow_map = Add<HCheckMaps>(
elements, isolate()->factory()->fixed_array_map(), top_info());
- check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
+ check_cow_map->ClearDependsOnFlag(kElementsKind);
}
HInstruction* length = NULL;
if (is_js_array) {
@@ -2215,7 +2200,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
key, graph()->GetConstant0(), Token::GTE);
negative_checker.Then();
HInstruction* result = AddElementAccess(
- backing_store, key, val, bounds_check, elements_kind, is_store);
+ backing_store, key, val, bounds_check, elements_kind, access_type);
negative_checker.ElseDeopt("Negative key encountered");
negative_checker.End();
length_checker.End();
@@ -2225,7 +2210,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
checked_key = Add<HBoundsCheck>(key, length);
return AddElementAccess(
backing_store, checked_key, val,
- checked_object, elements_kind, is_store);
+ checked_object, elements_kind, access_type);
}
}
ASSERT(fast_smi_only_elements ||
@@ -2235,7 +2220,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
// In case val is stored into a fast smi array, assure that the value is a smi
// before manipulating the backing store. Otherwise the actual store may
// deopt, leaving the backing store in an invalid state.
- if (is_store && IsFastSmiElementsKind(elements_kind) &&
+ if (access_type == STORE && IsFastSmiElementsKind(elements_kind) &&
!val->type().IsSmi()) {
val = AddUncasted<HForceRepresentation>(val, Representation::Smi());
}
@@ -2244,12 +2229,12 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
NoObservableSideEffectsScope no_effects(this);
elements = BuildCheckForCapacityGrow(checked_object, elements,
elements_kind, length, key,
- is_js_array, is_store);
+ is_js_array, access_type);
checked_key = key;
} else {
checked_key = Add<HBoundsCheck>(key, length);
- if (is_store && (fast_elements || fast_smi_only_elements)) {
+ if (access_type == STORE && (fast_elements || fast_smi_only_elements)) {
if (store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
NoObservableSideEffectsScope no_effects(this);
elements = BuildCopyElementsOnWrite(checked_object, elements,
@@ -2257,12 +2242,12 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
} else {
HCheckMaps* check_cow_map = Add<HCheckMaps>(
elements, isolate()->factory()->fixed_array_map(), top_info());
- check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
+ check_cow_map->ClearDependsOnFlag(kElementsKind);
}
}
}
return AddElementAccess(elements, checked_key, val, checked_object,
- elements_kind, is_store, load_mode);
+ elements_kind, access_type, load_mode);
}
@@ -2333,7 +2318,7 @@ HValue* HGraphBuilder::BuildAllocateElements(ElementsKind kind,
PretenureFlag pretenure_flag = !FLAG_allocation_site_pretenuring ?
isolate()->heap()->GetPretenureMode() : NOT_TENURED;
- return Add<HAllocate>(total_size, HType::JSArray(), pretenure_flag,
+ return Add<HAllocate>(total_size, HType::Tagged(), pretenure_flag,
instance_type);
}
@@ -2404,9 +2389,9 @@ HInstruction* HGraphBuilder::AddElementAccess(
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
- bool is_store,
+ PropertyAccessType access_type,
LoadKeyedHoleMode load_mode) {
- if (is_store) {
+ if (access_type == STORE) {
ASSERT(val != NULL);
if (elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
elements_kind == UINT8_CLAMPED_ELEMENTS) {
@@ -2418,7 +2403,7 @@ HInstruction* HGraphBuilder::AddElementAccess(
: INITIALIZING_STORE);
}
- ASSERT(!is_store);
+ ASSERT(access_type == LOAD);
ASSERT(val == NULL);
HLoadKeyed* load = Add<HLoadKeyed>(
elements, checked_key, dependency, elements_kind, load_mode);
@@ -2639,11 +2624,11 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate,
HValue* object_elements;
if (IsFastDoubleElementsKind(kind)) {
HValue* elems_size = Add<HConstant>(FixedDoubleArray::SizeFor(length));
- object_elements = Add<HAllocate>(elems_size, HType::JSArray(),
+ object_elements = Add<HAllocate>(elems_size, HType::Tagged(),
NOT_TENURED, FIXED_DOUBLE_ARRAY_TYPE);
} else {
HValue* elems_size = Add<HConstant>(FixedArray::SizeFor(length));
- object_elements = Add<HAllocate>(elems_size, HType::JSArray(),
+ object_elements = Add<HAllocate>(elems_size, HType::Tagged(),
NOT_TENURED, FIXED_ARRAY_TYPE);
}
Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
@@ -2834,7 +2819,8 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
// No need for a context lookup if the kind_ matches the initial
// map, because we can just load the map in that case.
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
- return builder()->AddLoadNamedField(constructor_function_, access);
+ return builder()->Add<HLoadNamedField>(
+ constructor_function_, static_cast<HValue*>(NULL), access);
}
// TODO(mvstanton): we should always have a constructor function if we
@@ -2859,7 +2845,8 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
// Find the map near the constructor function
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
- return builder()->AddLoadNamedField(constructor_function_, access);
+ return builder()->Add<HLoadNamedField>(
+ constructor_function_, static_cast<HValue*>(NULL), access);
}
@@ -2993,7 +2980,7 @@ HValue* HGraphBuilder::AddLoadJSBuiltin(Builtins::JavaScript builtin) {
HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
: HGraphBuilder(info),
function_state_(NULL),
- initial_function_state_(this, info, NORMAL_RETURN),
+ initial_function_state_(this, info, NORMAL_RETURN, 0),
ast_context_(NULL),
break_scope_(NULL),
inlined_count_(0),
@@ -3005,7 +2992,7 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
// to know it's the initial state.
function_state_= &initial_function_state_;
InitializeAstVisitor(info->zone());
- if (FLAG_emit_opt_code_positions) {
+ if (FLAG_hydrogen_track_positions) {
SetSourcePosition(info->shared_info()->start_position());
}
}
@@ -3074,7 +3061,8 @@ HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry(
}
-void HBasicBlock::FinishExit(HControlInstruction* instruction, int position) {
+void HBasicBlock::FinishExit(HControlInstruction* instruction,
+ HSourcePosition position) {
Finish(instruction, position);
ClearEnvironment();
}
@@ -3097,7 +3085,9 @@ HGraph::HGraph(CompilationInfo* info)
type_change_checksum_(0),
maximum_environment_size_(0),
no_side_effects_scope_count_(0),
- disallow_adding_new_values_(false) {
+ disallow_adding_new_values_(false),
+ next_inline_id_(0),
+ inlined_functions_(5, info->zone()) {
if (info->IsStub()) {
HydrogenCodeStub* stub = info->code_stub();
CodeStubInterfaceDescriptor* descriptor =
@@ -3105,6 +3095,7 @@ HGraph::HGraph(CompilationInfo* info)
start_environment_ =
new(zone_) HEnvironment(zone_, descriptor->environment_length());
} else {
+ TraceInlinedFunction(info->shared_info(), HSourcePosition::Unknown());
start_environment_ =
new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
}
@@ -3132,6 +3123,81 @@ void HGraph::FinalizeUniqueness() {
}
+int HGraph::TraceInlinedFunction(
+ Handle<SharedFunctionInfo> shared,
+ HSourcePosition position) {
+ if (!FLAG_hydrogen_track_positions) {
+ return 0;
+ }
+
+ int id = 0;
+ for (; id < inlined_functions_.length(); id++) {
+ if (inlined_functions_[id].shared().is_identical_to(shared)) {
+ break;
+ }
+ }
+
+ if (id == inlined_functions_.length()) {
+ inlined_functions_.Add(InlinedFunctionInfo(shared), zone());
+
+ if (!shared->script()->IsUndefined()) {
+ Handle<Script> script(Script::cast(shared->script()));
+ if (!script->source()->IsUndefined()) {
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ PrintF(tracing_scope.file(),
+ "--- FUNCTION SOURCE (%s) id{%d,%d} ---\n",
+ shared->DebugName()->ToCString().get(),
+ info()->optimization_id(),
+ id);
+
+ {
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(String::cast(script->source()),
+ &op,
+ shared->start_position());
+ // fun->end_position() points to the last character in the stream. We
+ // need to compensate by adding one to calculate the length.
+ int source_len =
+ shared->end_position() - shared->start_position() + 1;
+ for (int i = 0; i < source_len; i++) {
+ if (stream.HasMore()) {
+ PrintF(tracing_scope.file(), "%c", stream.GetNext());
+ }
+ }
+ }
+
+ PrintF(tracing_scope.file(), "\n--- END ---\n");
+ }
+ }
+ }
+
+ int inline_id = next_inline_id_++;
+
+ if (inline_id != 0) {
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ PrintF(tracing_scope.file(), "INLINE (%s) id{%d,%d} AS %d AT ",
+ shared->DebugName()->ToCString().get(),
+ info()->optimization_id(),
+ id,
+ inline_id);
+ position.PrintTo(tracing_scope.file());
+ PrintF(tracing_scope.file(), "\n");
+ }
+
+ return inline_id;
+}
+
+
+int HGraph::SourcePositionToScriptPosition(HSourcePosition pos) {
+ if (!FLAG_hydrogen_track_positions || pos.IsUnknown()) {
+ return pos.raw();
+ }
+
+ return inlined_functions_[pos.inlining_id()].start_position() +
+ pos.position();
+}
+
+
// Block ordering was implemented with two mutually recursive methods,
// HGraph::Postorder and HGraph::PostorderLoopBlocks.
// The recursion could lead to stack overflow so the algorithm has been
@@ -3510,7 +3576,8 @@ void HGraph::CollectPhis() {
// a (possibly inlined) function.
FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
- InliningKind inlining_kind)
+ InliningKind inlining_kind,
+ int inlining_id)
: owner_(owner),
compilation_info_(info),
call_context_(NULL),
@@ -3520,6 +3587,8 @@ FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
entry_(NULL),
arguments_object_(NULL),
arguments_elements_(NULL),
+ inlining_id_(inlining_id),
+ outer_source_position_(HSourcePosition::Unknown()),
outer_(owner->function_state()) {
if (outer_ != NULL) {
// State for an inline function.
@@ -3543,12 +3612,27 @@ FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
// Push on the state stack.
owner->set_function_state(this);
+
+ if (FLAG_hydrogen_track_positions) {
+ outer_source_position_ = owner->source_position();
+ owner->EnterInlinedSource(
+ info->shared_info()->start_position(),
+ inlining_id);
+ owner->SetSourcePosition(info->shared_info()->start_position());
+ }
}
FunctionState::~FunctionState() {
delete test_context_;
owner_->set_function_state(outer_);
+
+ if (FLAG_hydrogen_track_positions) {
+ owner_->set_source_position(outer_source_position_);
+ owner_->EnterInlinedSource(
+ outer_->compilation_info()->shared_info()->start_position(),
+ outer_->inlining_id());
+ }
}
@@ -3807,7 +3891,6 @@ void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
}
-
void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
HBasicBlock* true_block,
HBasicBlock* false_block) {
@@ -3816,20 +3899,6 @@ void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
}
-void HOptimizedGraphBuilder::VisitArgument(Expression* expr) {
- CHECK_ALIVE(VisitForValue(expr));
- Push(Add<HPushArgument>(Pop()));
-}
-
-
-void HOptimizedGraphBuilder::VisitArgumentList(
- ZoneList<Expression*>* arguments) {
- for (int i = 0; i < arguments->length(); i++) {
- CHECK_ALIVE(VisitArgument(arguments->at(i)));
- }
-}
-
-
void HOptimizedGraphBuilder::VisitExpressions(
ZoneList<Expression*>* exprs) {
for (int i = 0; i < exprs->length(); ++i) {
@@ -3979,10 +4048,11 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) {
if (FLAG_check_elimination) Run<HCheckEliminationPhase>();
- if (FLAG_use_range) Run<HRangeAnalysisPhase>();
+ if (FLAG_store_elimination) Run<HStoreEliminationPhase>();
+
+ Run<HRangeAnalysisPhase>();
Run<HComputeChangeUndefinedToNaN>();
- Run<HComputeMinusZeroChecksPhase>();
// Eliminate redundant stack checks on backwards branches.
Run<HStackCheckEliminationPhase>();
@@ -4304,7 +4374,12 @@ void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
TestContext* test = TestContext::cast(context);
VisitForControl(stmt->expression(), test->if_true(), test->if_false());
} else if (context->IsEffect()) {
- CHECK_ALIVE(VisitForEffect(stmt->expression()));
+ // Visit in value context and ignore the result. This is needed to keep
+ // environment in sync with full-codegen since some visitors (e.g.
+ // VisitCountOperation) use the operand stack differently depending on
+ // context.
+ CHECK_ALIVE(VisitForValue(stmt->expression()));
+ Pop();
Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
@@ -4361,8 +4436,10 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
Type* combined_type = clause->compare_type();
HControlInstruction* compare = BuildCompareInstruction(
Token::EQ_STRICT, tag_value, label_value, tag_type, label_type,
- combined_type, stmt->tag()->position(), clause->label()->position(),
- clause->id());
+ combined_type,
+ ScriptPositionToSourcePosition(stmt->tag()->position()),
+ ScriptPositionToSourcePosition(clause->label()->position()),
+ PUSH_BEFORE_SIMULATE, clause->id());
HBasicBlock* next_test_block = graph()->CreateBasicBlock();
HBasicBlock* body_block = graph()->CreateBasicBlock();
@@ -4782,14 +4859,14 @@ void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
HOptimizedGraphBuilder::GlobalPropertyAccess
HOptimizedGraphBuilder::LookupGlobalProperty(
- Variable* var, LookupResult* lookup, bool is_store) {
+ Variable* var, LookupResult* lookup, PropertyAccessType access_type) {
if (var->is_this() || !current_info()->has_global_object()) {
return kUseGeneric;
}
Handle<GlobalObject> global(current_info()->global_object());
global->Lookup(*var->name(), lookup);
if (!lookup->IsNormal() ||
- (is_store && lookup->IsReadOnly()) ||
+ (access_type == STORE && lookup->IsReadOnly()) ||
lookup->holder() != *global) {
return kUseGeneric;
}
@@ -4803,8 +4880,9 @@ HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
HValue* context = environment()->context();
int length = current_info()->scope()->ContextChainLength(var->scope());
while (length-- > 0) {
- context = AddLoadNamedField(
- context, HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
+ context = Add<HLoadNamedField>(
+ context, static_cast<HValue*>(NULL),
+ HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
}
return context;
}
@@ -4835,8 +4913,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
}
LookupResult lookup(isolate());
- GlobalPropertyAccess type =
- LookupGlobalProperty(variable, &lookup, false);
+ GlobalPropertyAccess type = LookupGlobalProperty(variable, &lookup, LOAD);
if (type == kUseCell &&
current_info()->global_object()->IsAccessCheckNeeded()) {
@@ -5038,7 +5115,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// TODO(mvstanton): Add a flag to turn off creation of any
// AllocationMementos for this call: we are in crankshaft and should have
// learned enough about transition behavior to stop emitting mementos.
- Runtime::FunctionId function_id = Runtime::kCreateObjectLiteral;
+ Runtime::FunctionId function_id = Runtime::kHiddenCreateObjectLiteral;
literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
Runtime::FunctionForId(function_id),
4);
@@ -5071,7 +5148,8 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
HInstruction* store;
if (map.is_null()) {
// If we don't know the monomorphic type, do a generic store.
- CHECK_ALIVE(store = BuildStoreNamedGeneric(literal, name, value));
+ CHECK_ALIVE(store = BuildNamedGeneric(
+ STORE, literal, name, value));
} else {
PropertyAccessInfo info(this, STORE, ToType(map), name);
if (info.CanAccessMonomorphic()) {
@@ -5081,8 +5159,8 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
&info, literal, checked_literal, value,
BailoutId::None(), BailoutId::None());
} else {
- CHECK_ALIVE(
- store = BuildStoreNamedGeneric(literal, name, value));
+ CHECK_ALIVE(store = BuildNamedGeneric(
+ STORE, literal, name, value));
}
}
AddInstruction(store);
@@ -5194,7 +5272,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// TODO(mvstanton): Consider a flag to turn off creation of any
// AllocationMementos for this call: we are in crankshaft and should have
// learned enough about transition behavior to stop emitting mementos.
- Runtime::FunctionId function_id = Runtime::kCreateArrayLiteral;
+ Runtime::FunctionId function_id = Runtime::kHiddenCreateArrayLiteral;
literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
Runtime::FunctionForId(function_id),
4);
@@ -5258,6 +5336,24 @@ HCheckMaps* HOptimizedGraphBuilder::AddCheckMap(HValue* object,
}
+HInstruction* HOptimizedGraphBuilder::BuildLoadNamedField(
+ PropertyAccessInfo* info,
+ HValue* checked_object) {
+ HObjectAccess access = info->access();
+ if (access.representation().IsDouble()) {
+ // Load the heap number.
+ checked_object = Add<HLoadNamedField>(
+ checked_object, static_cast<HValue*>(NULL),
+ access.WithRepresentation(Representation::Tagged()));
+ checked_object->set_type(HType::HeapNumber());
+ // Load the double value from it.
+ access = HObjectAccess::ForHeapNumberValue();
+ }
+ return New<HLoadNamedField>(
+ checked_object, static_cast<HValue*>(NULL), access);
+}
+
+
HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
PropertyAccessInfo* info,
HValue* checked_object,
@@ -5268,7 +5364,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
info->map(), info->lookup(), info->name());
HStoreNamedField *instr;
- if (FLAG_track_double_fields && field_access.representation().IsDouble()) {
+ if (field_access.representation().IsDouble()) {
HObjectAccess heap_number_access =
field_access.WithRepresentation(Representation::Tagged());
if (transition_to_field) {
@@ -5308,30 +5404,12 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
if (transition_to_field) {
HConstant* transition_constant = Add<HConstant>(info->transition());
instr->SetTransition(transition_constant, top_info());
- instr->SetGVNFlag(kChangesMaps);
+ instr->SetChangesFlag(kMaps);
}
return instr;
}
-HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
- HValue* object,
- Handle<String> name,
- HValue* value,
- bool is_uninitialized) {
- if (is_uninitialized) {
- Add<HDeoptimize>("Insufficient type feedback for property assignment",
- Deoptimizer::SOFT);
- }
-
- return New<HStoreNamedGeneric>(
- object,
- name,
- value,
- function_strict_mode_flag());
-}
-
-
bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatible(
PropertyAccessInfo* info) {
if (!CanInlinePropertyAccess(type_)) return false;
@@ -5513,7 +5591,7 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessAsMonomorphic(
static bool NeedsWrappingFor(Type* type, Handle<JSFunction> target) {
return type->Is(Type::NumberOrString()) &&
- target->shared()->is_classic_mode() &&
+ target->shared()->strict_mode() == SLOPPY &&
!target->shared()->native();
}
@@ -5546,7 +5624,7 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicAccess(
if (info->lookup()->IsField()) {
if (info->IsLoad()) {
- return BuildLoadNamedField(checked_holder, info->access());
+ return BuildLoadNamedField(info, checked_holder);
} else {
return BuildStoreNamedField(info, checked_object, value);
}
@@ -5642,7 +5720,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
smi_check = New<HIsSmiAndBranch>(
object, empty_smi_block, not_smi_block);
FinishCurrentBlock(smi_check);
- Goto(empty_smi_block, number_block);
+ GotoNoSimulate(empty_smi_block, number_block);
set_current_block(not_smi_block);
} else {
BuildCheckHeapObject(object);
@@ -5668,9 +5746,8 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
FinishCurrentBlock(compare);
if (info.type()->Is(Type::Number())) {
- Goto(if_true, number_block);
+ GotoNoSimulate(if_true, number_block);
if_true = number_block;
- number_block->SetJoinId(ast_id);
}
set_current_block(if_true);
@@ -5704,32 +5781,11 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
- // Because the deopt may be the only path in the polymorphic load, make sure
- // that the environment stack matches the depth on deopt that it otherwise
- // would have had after a successful load.
- if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
- const char* message = "";
- switch (access_type) {
- case LOAD:
- message = "Unknown map in polymorphic load";
- break;
- case STORE:
- message = "Unknown map in polymorphic store";
- break;
- }
- FinishExitWithHardDeoptimization(message, join);
+ FinishExitWithHardDeoptimization("Uknown map in polymorphic access");
} else {
- HValue* result = NULL;
- switch (access_type) {
- case LOAD:
- result = Add<HLoadNamedGeneric>(object, name);
- break;
- case STORE:
- AddInstruction(BuildStoreNamedGeneric(object, name, value));
- result = value;
- break;
- }
- if (!ast_context()->IsEffect()) Push(result);
+ HInstruction* instr = BuildNamedGeneric(access_type, object, name, value);
+ AddInstruction(instr);
+ if (!ast_context()->IsEffect()) Push(access_type == LOAD ? instr : value);
if (join != NULL) {
Goto(join);
@@ -5741,9 +5797,13 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
}
ASSERT(join != NULL);
- join->SetJoinId(ast_id);
- set_current_block(join);
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+ if (join->HasPredecessor()) {
+ join->SetJoinId(ast_id);
+ set_current_block(join);
+ if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+ } else {
+ set_current_block(NULL);
+ }
}
@@ -5784,8 +5844,7 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr,
HValue* object = environment()->ExpressionStackAt(2);
bool has_side_effects = false;
HandleKeyedElementAccess(object, key, value, expr,
- true, // is_store
- &has_side_effects);
+ STORE, &has_side_effects);
Drop(3);
Push(value);
Add<HSimulate>(return_id, REMOVABLE_SIMULATE);
@@ -5835,7 +5894,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
HValue* value,
BailoutId ast_id) {
LookupResult lookup(isolate());
- GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
+ GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, STORE);
if (type == kUseCell) {
Handle<GlobalObject> global(current_info()->global_object());
Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
@@ -5873,7 +5932,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
HStoreNamedGeneric* instr =
Add<HStoreNamedGeneric>(global_object, var->name(),
- value, function_strict_mode_flag());
+ value, function_strict_mode());
USE(instr);
ASSERT(instr->HasObservableSideEffects());
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -5908,7 +5967,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
case Variable::PARAMETER:
case Variable::LOCAL:
- if (var->mode() == CONST) {
+ if (var->mode() == CONST_LEGACY) {
return Bailout(kUnsupportedConstCompoundAssignment);
}
BindIfLive(var, Top());
@@ -5937,11 +5996,11 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
mode = HStoreContextSlot::kCheckDeoptimize;
break;
case CONST:
- return ast_context()->ReturnValue(Pop());
- case CONST_HARMONY:
// This case is checked statically so no need to
// perform checks here
UNREACHABLE();
+ case CONST_LEGACY:
+ return ast_context()->ReturnValue(Pop());
default:
mode = HStoreContextSlot::kNoCheck;
}
@@ -6006,6 +6065,10 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
if (var->mode() == CONST) {
if (expr->op() != Token::INIT_CONST) {
+ return Bailout(kNonInitializerAssignmentToConst);
+ }
+ } else if (var->mode() == CONST_LEGACY) {
+ if (expr->op() != Token::INIT_CONST_LEGACY) {
CHECK_ALIVE(VisitForValue(expr->value()));
return ast_context()->ReturnValue(Pop());
}
@@ -6016,10 +6079,6 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
HValue* old_value = environment()->Lookup(var);
Add<HUseConst>(old_value);
}
- } else if (var->mode() == CONST_HARMONY) {
- if (expr->op() != Token::INIT_CONST_HARMONY) {
- return Bailout(kNonInitializerAssignmentToConst);
- }
}
if (proxy->IsArguments()) return Bailout(kAssignmentToArguments);
@@ -6075,20 +6134,20 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
mode = HStoreContextSlot::kCheckDeoptimize;
break;
case CONST:
- return ast_context()->ReturnValue(Pop());
- case CONST_HARMONY:
// This case is checked statically so no need to
// perform checks here
UNREACHABLE();
+ case CONST_LEGACY:
+ return ast_context()->ReturnValue(Pop());
default:
mode = HStoreContextSlot::kNoCheck;
}
} else if (expr->op() == Token::INIT_VAR ||
expr->op() == Token::INIT_LET ||
- expr->op() == Token::INIT_CONST_HARMONY) {
+ expr->op() == Token::INIT_CONST) {
mode = HStoreContextSlot::kNoCheck;
} else {
- ASSERT(expr->op() == Token::INIT_CONST);
+ ASSERT(expr->op() == Token::INIT_CONST_LEGACY);
mode = HStoreContextSlot::kCheckIgnoreAssignment;
}
@@ -6128,10 +6187,10 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
CHECK_ALIVE(VisitForValue(expr->exception()));
HValue* value = environment()->Pop();
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
Add<HPushArgument>(value);
Add<HCallRuntime>(isolate()->factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kThrow), 1);
+ Runtime::FunctionForId(Runtime::kHiddenThrow), 1);
Add<HSimulate>(expr->id());
// If the throw definitely exits the function, we can finish with a dummy
@@ -6143,29 +6202,6 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
}
-HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
- HObjectAccess access) {
- if (FLAG_track_double_fields && access.representation().IsDouble()) {
- // load the heap number
- HLoadNamedField* heap_number = Add<HLoadNamedField>(
- object, static_cast<HValue*>(NULL),
- access.WithRepresentation(Representation::Tagged()));
- heap_number->set_type(HType::HeapNumber());
- // load the double value from it
- return New<HLoadNamedField>(
- heap_number, static_cast<HValue*>(NULL),
- HObjectAccess::ForHeapNumberValue());
- }
- return New<HLoadNamedField>(object, static_cast<HValue*>(NULL), access);
-}
-
-
-HInstruction* HGraphBuilder::AddLoadNamedField(HValue* object,
- HObjectAccess access) {
- return AddInstruction(BuildLoadNamedField(object, access));
-}
-
-
HInstruction* HGraphBuilder::AddLoadStringInstanceType(HValue* string) {
if (string->IsConstant()) {
HConstant* c_string = HConstant::cast(string);
@@ -6173,9 +6209,10 @@ HInstruction* HGraphBuilder::AddLoadStringInstanceType(HValue* string) {
return Add<HConstant>(c_string->StringValue()->map()->instance_type());
}
}
- return AddLoadNamedField(
- AddLoadNamedField(string, HObjectAccess::ForMap()),
- HObjectAccess::ForMapInstanceType());
+ return Add<HLoadNamedField>(
+ Add<HLoadNamedField>(string, static_cast<HValue*>(NULL),
+ HObjectAccess::ForMap()),
+ static_cast<HValue*>(NULL), HObjectAccess::ForMapInstanceType());
}
@@ -6186,26 +6223,40 @@ HInstruction* HGraphBuilder::AddLoadStringLength(HValue* string) {
return Add<HConstant>(c_string->StringValue()->length());
}
}
- return AddLoadNamedField(string, HObjectAccess::ForStringLength());
+ return Add<HLoadNamedField>(string, static_cast<HValue*>(NULL),
+ HObjectAccess::ForStringLength());
}
-HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
+HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
+ PropertyAccessType access_type,
HValue* object,
Handle<String> name,
+ HValue* value,
bool is_uninitialized) {
if (is_uninitialized) {
- Add<HDeoptimize>("Insufficient type feedback for generic named load",
+ Add<HDeoptimize>("Insufficient type feedback for generic named access",
Deoptimizer::SOFT);
}
- return New<HLoadNamedGeneric>(object, name);
+ if (access_type == LOAD) {
+ return New<HLoadNamedGeneric>(object, name);
+ } else {
+ return New<HStoreNamedGeneric>(object, name, value, function_strict_mode());
+ }
}
-HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
- HValue* key) {
- return New<HLoadKeyedGeneric>(object, key);
+HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
+ PropertyAccessType access_type,
+ HValue* object,
+ HValue* key,
+ HValue* value) {
+ if (access_type == LOAD) {
+ return New<HLoadKeyedGeneric>(object, key);
+ } else {
+ return New<HStoreKeyedGeneric>(object, key, value, function_strict_mode());
+ }
}
@@ -6231,15 +6282,15 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
HValue* val,
HValue* dependency,
Handle<Map> map,
- bool is_store,
+ PropertyAccessType access_type,
KeyedAccessStoreMode store_mode) {
HCheckMaps* checked_object = Add<HCheckMaps>(object, map, top_info(),
dependency);
if (dependency) {
- checked_object->ClearGVNFlag(kDependsOnElementsKind);
+ checked_object->ClearDependsOnFlag(kElementsKind);
}
- if (is_store && map->prototype()->IsJSObject()) {
+ if (access_type == STORE && map->prototype()->IsJSObject()) {
// monomorphic stores need a prototype chain check because shape
// changes could allow callbacks on elements in the chain that
// aren't compatible with monomorphic keyed stores.
@@ -6258,7 +6309,7 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
return BuildUncheckedMonomorphicElementAccess(
checked_object, key, val,
map->instance_type() == JS_ARRAY_TYPE,
- map->elements_kind(), is_store,
+ map->elements_kind(), access_type,
load_mode, store_mode);
}
@@ -6324,7 +6375,7 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
checked_object, key, val,
most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
consolidated_elements_kind,
- false, NEVER_RETURN_HOLE, STANDARD_STORE);
+ LOAD, NEVER_RETURN_HOLE, STANDARD_STORE);
return instr;
}
@@ -6334,13 +6385,13 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HValue* key,
HValue* val,
SmallMapList* maps,
- bool is_store,
+ PropertyAccessType access_type,
KeyedAccessStoreMode store_mode,
bool* has_side_effects) {
*has_side_effects = false;
BuildCheckHeapObject(object);
- if (!is_store) {
+ if (access_type == LOAD) {
HInstruction* consolidated_load =
TryBuildConsolidatedElementLoad(object, key, val, maps);
if (consolidated_load != NULL) {
@@ -6360,6 +6411,11 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
elements_kind != GetInitialFastElementsKind()) {
possible_transitioned_maps.Add(map);
}
+ if (elements_kind == SLOPPY_ARGUMENTS_ELEMENTS) {
+ HInstruction* result = BuildKeyedGeneric(access_type, object, key, val);
+ *has_side_effects = result->HasObservableSideEffects();
+ return AddInstruction(result);
+ }
}
// Get transition target for each map (NULL == no transition).
for (int i = 0; i < maps->length(); ++i) {
@@ -6393,15 +6449,14 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HInstruction* instr = NULL;
if (untransitionable_map->has_slow_elements_kind() ||
!untransitionable_map->IsJSObjectMap()) {
- instr = AddInstruction(is_store ? BuildStoreKeyedGeneric(object, key, val)
- : BuildLoadKeyedGeneric(object, key));
+ instr = AddInstruction(BuildKeyedGeneric(access_type, object, key, val));
} else {
instr = BuildMonomorphicElementAccess(
- object, key, val, transition, untransitionable_map, is_store,
+ object, key, val, transition, untransitionable_map, access_type,
store_mode);
}
*has_side_effects |= instr->HasObservableSideEffects();
- return is_store ? NULL : instr;
+ return access_type == STORE ? NULL : instr;
}
HBasicBlock* join = graph()->CreateBasicBlock();
@@ -6419,25 +6474,24 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
set_current_block(this_map);
HInstruction* access = NULL;
if (IsDictionaryElementsKind(elements_kind)) {
- access = is_store
- ? AddInstruction(BuildStoreKeyedGeneric(object, key, val))
- : AddInstruction(BuildLoadKeyedGeneric(object, key));
+ access = AddInstruction(BuildKeyedGeneric(access_type, object, key, val));
} else {
ASSERT(IsFastElementsKind(elements_kind) ||
- IsExternalArrayElementsKind(elements_kind));
+ IsExternalArrayElementsKind(elements_kind) ||
+ IsFixedTypedArrayElementsKind(elements_kind));
LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
// Happily, mapcompare is a checked object.
access = BuildUncheckedMonomorphicElementAccess(
mapcompare, key, val,
map->instance_type() == JS_ARRAY_TYPE,
- elements_kind, is_store,
+ elements_kind, access_type,
load_mode,
store_mode);
}
*has_side_effects |= access->HasObservableSideEffects();
// The caller will use has_side_effects and add a correct Simulate.
access->SetFlag(HValue::kHasNoObservableSideEffects);
- if (!is_store) {
+ if (access_type == LOAD) {
Push(access);
}
NoObservableSideEffectsScope scope(this);
@@ -6445,12 +6499,16 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
set_current_block(other_map);
}
+ // Ensure that we visited at least one map above that goes to join. This is
+ // necessary because FinishExitWithHardDeoptimization does an AbnormalExit
+ // rather than joining the join block. If this becomes an issue, insert a
+ // generic access in the case length() == 0.
+ ASSERT(join->predecessors()->length() > 0);
// Deopt if none of the cases matched.
NoObservableSideEffectsScope scope(this);
- FinishExitWithHardDeoptimization("Unknown map in polymorphic element access",
- join);
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic element access");
set_current_block(join);
- return is_store ? NULL : Pop();
+ return access_type == STORE ? NULL : Pop();
}
@@ -6459,7 +6517,7 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
HValue* key,
HValue* val,
Expression* expr,
- bool is_store,
+ PropertyAccessType access_type,
bool* has_side_effects) {
ASSERT(!expr->IsPropertyName());
HInstruction* instr = NULL;
@@ -6468,7 +6526,8 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
bool monomorphic = ComputeReceiverTypes(expr, obj, &types, zone());
bool force_generic = false;
- if (is_store && (monomorphic || (types != NULL && !types->is_empty()))) {
+ if (access_type == STORE &&
+ (monomorphic || (types != NULL && !types->is_empty()))) {
// Stores can't be mono/polymorphic if their prototype chain has dictionary
// elements. However a receiver map that has dictionary elements itself
// should be left to normal mono/poly behavior (the other maps may benefit
@@ -6486,52 +6545,36 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
if (monomorphic) {
Handle<Map> map = types->first();
if (map->has_slow_elements_kind() || !map->IsJSObjectMap()) {
- instr = is_store ? BuildStoreKeyedGeneric(obj, key, val)
- : BuildLoadKeyedGeneric(obj, key);
- AddInstruction(instr);
+ instr = AddInstruction(BuildKeyedGeneric(access_type, obj, key, val));
} else {
BuildCheckHeapObject(obj);
instr = BuildMonomorphicElementAccess(
- obj, key, val, NULL, map, is_store, expr->GetStoreMode());
+ obj, key, val, NULL, map, access_type, expr->GetStoreMode());
}
} else if (!force_generic && (types != NULL && !types->is_empty())) {
return HandlePolymorphicElementAccess(
- obj, key, val, types, is_store,
+ obj, key, val, types, access_type,
expr->GetStoreMode(), has_side_effects);
} else {
- if (is_store) {
+ if (access_type == STORE) {
if (expr->IsAssignment() &&
expr->AsAssignment()->HasNoTypeInformation()) {
Add<HDeoptimize>("Insufficient type feedback for keyed store",
Deoptimizer::SOFT);
}
- instr = BuildStoreKeyedGeneric(obj, key, val);
} else {
if (expr->AsProperty()->HasNoTypeInformation()) {
Add<HDeoptimize>("Insufficient type feedback for keyed load",
Deoptimizer::SOFT);
}
- instr = BuildLoadKeyedGeneric(obj, key);
}
- AddInstruction(instr);
+ instr = AddInstruction(BuildKeyedGeneric(access_type, obj, key, val));
}
*has_side_effects = instr->HasObservableSideEffects();
return instr;
}
-HInstruction* HOptimizedGraphBuilder::BuildStoreKeyedGeneric(
- HValue* object,
- HValue* key,
- HValue* value) {
- return New<HStoreKeyedGeneric>(
- object,
- key,
- value,
- function_strict_mode_flag());
-}
-
-
void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
// Outermost function already has arguments on the stack.
if (function_state()->outer() == NULL) return;
@@ -6644,11 +6687,7 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedAccess(
&info, object, checked_object, value, ast_id, return_id);
}
- if (access == LOAD) {
- return BuildLoadNamedGeneric(object, name, is_uninitialized);
- } else {
- return BuildStoreNamedGeneric(object, name, value, is_uninitialized);
- }
+ return BuildNamedGeneric(access, object, name, value, is_uninitialized);
}
@@ -6692,9 +6731,7 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr,
bool has_side_effects = false;
HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, expr,
- false, // is_store
- &has_side_effects);
+ obj, key, NULL, expr, LOAD, &has_side_effects);
if (has_side_effects) {
if (ast_context()->IsEffect()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -6740,7 +6777,7 @@ HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant,
AddInstruction(constant_value);
HCheckMaps* check =
Add<HCheckMaps>(constant_value, handle(constant->map()), info);
- check->ClearGVNFlag(kDependsOnElementsKind);
+ check->ClearDependsOnFlag(kElementsKind);
return check;
}
@@ -6824,44 +6861,13 @@ HInstruction* HOptimizedGraphBuilder::BuildCallConstantFunction(
}
-class FunctionSorter {
- public:
- FunctionSorter() : index_(0), ticks_(0), ast_length_(0), src_length_(0) { }
- FunctionSorter(int index, int ticks, int ast_length, int src_length)
- : index_(index),
- ticks_(ticks),
- ast_length_(ast_length),
- src_length_(src_length) { }
-
- int index() const { return index_; }
- int ticks() const { return ticks_; }
- int ast_length() const { return ast_length_; }
- int src_length() const { return src_length_; }
-
- private:
- int index_;
- int ticks_;
- int ast_length_;
- int src_length_;
-};
-
-
-inline bool operator<(const FunctionSorter& lhs, const FunctionSorter& rhs) {
- int diff = lhs.ticks() - rhs.ticks();
- if (diff != 0) return diff > 0;
- diff = lhs.ast_length() - rhs.ast_length();
- if (diff != 0) return diff < 0;
- return lhs.src_length() < rhs.src_length();
-}
-
-
void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
Call* expr,
HValue* receiver,
SmallMapList* types,
Handle<String> name) {
int argument_count = expr->arguments()->length() + 1; // Includes receiver.
- FunctionSorter order[kMaxCallPolymorphism];
+ int order[kMaxCallPolymorphism];
bool handle_smi = false;
bool handled_string = false;
@@ -6883,23 +6889,17 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
handle_smi = true;
}
expr->set_target(target);
- order[ordered_functions++] =
- FunctionSorter(i,
- expr->target()->shared()->profiler_ticks(),
- InliningAstSize(expr->target()),
- expr->target()->shared()->SourceSize());
+ order[ordered_functions++] = i;
}
}
- std::sort(order, order + ordered_functions);
-
HBasicBlock* number_block = NULL;
HBasicBlock* join = NULL;
handled_string = false;
int count = 0;
for (int fn = 0; fn < ordered_functions; ++fn) {
- int i = order[fn].index();
+ int i = order[fn];
PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name);
if (info.type()->Is(Type::String())) {
if (handled_string) continue;
@@ -6919,7 +6919,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
number_block = graph()->CreateBasicBlock();
FinishCurrentBlock(New<HIsSmiAndBranch>(
receiver, empty_smi_block, not_smi_block));
- Goto(empty_smi_block, number_block);
+ GotoNoSimulate(empty_smi_block, number_block);
set_current_block(not_smi_block);
} else {
BuildCheckHeapObject(receiver);
@@ -6942,9 +6942,8 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
FinishCurrentBlock(compare);
if (info.type()->Is(Type::Number())) {
- Goto(if_true, number_block);
+ GotoNoSimulate(if_true, number_block);
if_true = number_block;
- number_block->SetJoinId(expr->id());
}
set_current_block(if_true);
@@ -6992,16 +6991,11 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (ordered_functions == types->length() && FLAG_deoptimize_uncommon_cases) {
- // Because the deopt may be the only path in the polymorphic call, make sure
- // that the environment stack matches the depth on deopt that it otherwise
- // would have had after a successful call.
- Drop(1); // Drop receiver.
- if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
- FinishExitWithHardDeoptimization("Unknown map in polymorphic call", join);
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic call");
} else {
Property* prop = expr->expression()->AsProperty();
- HInstruction* function = BuildLoadNamedGeneric(
- receiver, name, prop->IsUninitialized());
+ HInstruction* function = BuildNamedGeneric(
+ LOAD, receiver, name, NULL, prop->IsUninitialized());
AddInstruction(function);
Push(function);
AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
@@ -7105,7 +7099,8 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
HValue* implicit_return_value,
BailoutId ast_id,
BailoutId return_id,
- InliningKind inlining_kind) {
+ InliningKind inlining_kind,
+ HSourcePosition position) {
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
@@ -7237,11 +7232,13 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
ASSERT(target_shared->has_deoptimization_support());
AstTyper::Run(&target_info);
+ int function_id = graph()->TraceInlinedFunction(target_shared, position);
+
// Save the pending call context. Set up new one for the inlined function.
// The function state is new-allocated because we need to delete it
// in two different places.
FunctionState* target_state = new FunctionState(
- this, &target_info, inlining_kind);
+ this, &target_info, inlining_kind, function_id);
HConstant* undefined = graph()->GetConstantUndefined();
@@ -7388,7 +7385,8 @@ bool HOptimizedGraphBuilder::TryInlineCall(Call* expr) {
NULL,
expr->id(),
expr->ReturnId(),
- NORMAL_RETURN);
+ NORMAL_RETURN,
+ ScriptPositionToSourcePosition(expr->position()));
}
@@ -7399,7 +7397,8 @@ bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
implicit_return_value,
expr->id(),
expr->ReturnId(),
- CONSTRUCT_CALL_RETURN);
+ CONSTRUCT_CALL_RETURN,
+ ScriptPositionToSourcePosition(expr->position()));
}
@@ -7413,7 +7412,8 @@ bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
NULL,
ast_id,
return_id,
- GETTER_CALL_RETURN);
+ GETTER_CALL_RETURN,
+ source_position());
}
@@ -7427,7 +7427,8 @@ bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
1,
implicit_return_value,
id, assignment_id,
- SETTER_CALL_RETURN);
+ SETTER_CALL_RETURN,
+ source_position());
}
@@ -7439,7 +7440,8 @@ bool HOptimizedGraphBuilder::TryInlineApply(Handle<JSFunction> function,
NULL,
expr->id(),
expr->ReturnId(),
- NORMAL_RETURN);
+ NORMAL_RETURN,
+ ScriptPositionToSourcePosition(expr->position()));
}
@@ -7455,6 +7457,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr) {
case kMathAbs:
case kMathSqrt:
case kMathLog:
+ case kMathClz32:
if (expr->arguments()->length() == 1) {
HValue* argument = Pop();
Drop(2); // Receiver and function.
@@ -7525,6 +7528,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
case kMathAbs:
case kMathSqrt:
case kMathLog:
+ case kMathClz32:
if (argument_count == 2) {
HValue* argument = Pop();
Drop(2); // Receiver and function.
@@ -7623,7 +7627,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
}
reduced_length = AddUncasted<HSub>(length, graph()->GetConstant1());
result = AddElementAccess(elements, reduced_length, NULL,
- bounds_check, elements_kind, false);
+ bounds_check, elements_kind, LOAD);
Factory* factory = isolate()->factory();
double nan_double = FixedDoubleArray::hole_nan_as_double();
HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
@@ -7633,7 +7637,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
elements_kind = FAST_HOLEY_ELEMENTS;
}
AddElementAccess(
- elements, reduced_length, hole, bounds_check, elements_kind, true);
+ elements, reduced_length, hole, bounds_check, elements_kind, STORE);
Add<HStoreNamedField>(
checked_object, HObjectAccess::ForArrayLength(elements_kind),
reduced_length, STORE_TO_INITIALIZED_ENTRY);
@@ -7778,6 +7782,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
}
bool drop_extra = false;
+ bool is_store = false;
switch (call_type) {
case kCallApiFunction:
case kCallApiMethod:
@@ -7804,6 +7809,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
break;
case kCallApiSetter:
{
+ is_store = true;
// Receiver and prototype chain cannot have changed.
ASSERT_EQ(1, argc);
ASSERT_EQ(NULL, receiver);
@@ -7849,7 +7855,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
CallInterfaceDescriptor* descriptor =
isolate()->call_descriptor(Isolate::ApiFunctionCall);
- CallApiFunctionStub stub(true, call_data_is_undefined, argc);
+ CallApiFunctionStub stub(is_store, call_data_is_undefined, argc);
Handle<Code> code = stub.GetCode(isolate());
HConstant* code_value = Add<HConstant>(code);
@@ -7941,7 +7947,7 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
HValue* HOptimizedGraphBuilder::ImplicitReceiverFor(HValue* function,
Handle<JSFunction> target) {
SharedFunctionInfo* shared = target->shared();
- if (shared->is_classic_mode() && !shared->native()) {
+ if (shared->strict_mode() == SLOPPY && !shared->native()) {
// Cannot embed a direct reference to the global proxy
// as is it dropped on deserialization.
CHECK(!Serializer::enabled());
@@ -7987,6 +7993,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(PushLoad(prop, receiver, key));
HValue* function = Pop();
+ if (FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
+
// Push the function under the receiver.
environment()->SetExpressionStackAt(0, function);
@@ -8041,6 +8049,10 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
return Bailout(kPossibleDirectCallToEval);
}
+ // The function is on the stack in the unoptimized code during
+ // evaluation of the arguments.
+ CHECK_ALIVE(VisitForValue(expr->expression()));
+ HValue* function = Top();
bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
if (global_call) {
Variable* var = proxy->var();
@@ -8049,14 +8061,12 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
// access check is not enabled we assume that the function will not change
// and generate optimized code for calling the function.
LookupResult lookup(isolate());
- GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
+ GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, LOAD);
if (type == kUseCell &&
!current_info()->global_object()->IsAccessCheckNeeded()) {
Handle<GlobalObject> global(current_info()->global_object());
known_global_function = expr->ComputeGlobalTarget(global, &lookup);
}
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Top();
if (known_global_function) {
Add<HCheckValue>(function, expr->target());
@@ -8083,18 +8093,13 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
PushArgumentsFromEnvironment(argument_count);
call = BuildCallConstantFunction(expr->target(), argument_count);
} else {
- Push(Add<HPushArgument>(graph()->GetConstantUndefined()));
- CHECK_ALIVE(VisitArgumentList(expr->arguments()));
+ Push(graph()->GetConstantUndefined());
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ PushArgumentsFromEnvironment(argument_count);
call = New<HCallFunction>(function, argument_count);
- Drop(argument_count);
}
} else if (expr->IsMonomorphic()) {
- // The function is on the stack in the unoptimized code during
- // evaluation of the arguments.
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Top();
-
Add<HCheckValue>(function, expr->target());
Push(graph()->GetConstantUndefined());
@@ -8120,13 +8125,10 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
function, expr->target(), argument_count));
} else {
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Top();
- HValue* receiver = graph()->GetConstantUndefined();
- Push(Add<HPushArgument>(receiver));
- CHECK_ALIVE(VisitArgumentList(expr->arguments()));
+ Push(graph()->GetConstantUndefined());
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ PushArgumentsFromEnvironment(argument_count);
call = New<HCallFunction>(function, argument_count);
- Drop(argument_count);
}
}
@@ -8211,9 +8213,8 @@ static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
bool HOptimizedGraphBuilder::IsCallNewArrayInlineable(CallNew* expr) {
- bool inline_ok = false;
Handle<JSFunction> caller = current_info()->closure();
- Handle<JSFunction> target(isolate()->global_context()->array_function(),
+ Handle<JSFunction> target(isolate()->native_context()->array_function(),
isolate());
int argument_count = expr->arguments()->length();
// We should have the function plus array arguments on the environment stack.
@@ -8221,6 +8222,7 @@ bool HOptimizedGraphBuilder::IsCallNewArrayInlineable(CallNew* expr) {
Handle<AllocationSite> site = expr->allocation_site();
ASSERT(!site.is_null());
+ bool inline_ok = false;
if (site->CanInlineCall()) {
// We also want to avoid inlining in certain 1 argument scenarios.
if (argument_count == 1) {
@@ -8259,7 +8261,7 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
int argument_count = expr->arguments()->length() + 1; // Plus constructor.
Factory* factory = isolate()->factory();
@@ -8289,12 +8291,25 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
// Allocate an instance of the implicit receiver object.
HValue* size_in_bytes = Add<HConstant>(instance_size);
- PretenureFlag pretenure_flag =
- (FLAG_pretenuring_call_new && !FLAG_allocation_site_pretenuring) ?
- isolate()->heap()->GetPretenureMode() : NOT_TENURED;
+ HAllocationMode allocation_mode;
+ if (FLAG_pretenuring_call_new) {
+ if (FLAG_allocation_site_pretenuring) {
+ // Try to use pretenuring feedback.
+ Handle<AllocationSite> allocation_site = expr->allocation_site();
+ allocation_mode = HAllocationMode(allocation_site);
+ // Take a dependency on allocation site.
+ AllocationSite::AddDependentCompilationInfo(allocation_site,
+ AllocationSite::TENURING,
+ top_info());
+ } else {
+ allocation_mode = HAllocationMode(
+ isolate()->heap()->GetPretenureMode());
+ }
+ }
+
HAllocate* receiver =
- Add<HAllocate>(size_in_bytes, HType::JSObject(), pretenure_flag,
- JS_OBJECT_TYPE);
+ BuildAllocate(size_in_bytes, HType::JSObject(), JS_OBJECT_TYPE,
+ allocation_mode);
receiver->set_known_initial_map(initial_map);
// Load the initial map from the constructor.
@@ -8360,7 +8375,7 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
// The constructor function is both an operand to the instruction and an
// argument to the construct call.
Handle<JSFunction> array_function(
- isolate()->global_context()->array_function(), isolate());
+ isolate()->native_context()->array_function(), isolate());
bool use_call_new_array = expr->target().is_identical_to(array_function);
if (use_call_new_array && IsCallNewArrayInlineable(expr)) {
// Verify we are still calling the array function for our native context.
@@ -8394,7 +8409,7 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
const HOptimizedGraphBuilder::InlineFunctionGenerator
HOptimizedGraphBuilder::kInlineFunctionGenerators[] = {
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
- INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
+ INLINE_OPTIMIZED_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
};
#undef INLINE_FUNCTION_GENERATOR_ADDRESS
@@ -8416,9 +8431,6 @@ void HGraphBuilder::BuildArrayBufferViewInitialization(
Add<HStoreNamedField>(
obj,
- HObjectAccess::ForJSArrayBufferViewBuffer(), buffer);
- Add<HStoreNamedField>(
- obj,
HObjectAccess::ForJSArrayBufferViewByteOffset(),
byte_offset);
Add<HStoreNamedField>(
@@ -8426,18 +8438,31 @@ void HGraphBuilder::BuildArrayBufferViewInitialization(
HObjectAccess::ForJSArrayBufferViewByteLength(),
byte_length);
- HObjectAccess weak_first_view_access =
- HObjectAccess::ForJSArrayBufferWeakFirstView();
- Add<HStoreNamedField>(obj,
- HObjectAccess::ForJSArrayBufferViewWeakNext(),
- Add<HLoadNamedField>(buffer, static_cast<HValue*>(NULL),
- weak_first_view_access));
- Add<HStoreNamedField>(
- buffer, weak_first_view_access, obj);
+ if (buffer != NULL) {
+ Add<HStoreNamedField>(
+ obj,
+ HObjectAccess::ForJSArrayBufferViewBuffer(), buffer);
+ HObjectAccess weak_first_view_access =
+ HObjectAccess::ForJSArrayBufferWeakFirstView();
+ Add<HStoreNamedField>(obj,
+ HObjectAccess::ForJSArrayBufferViewWeakNext(),
+ Add<HLoadNamedField>(buffer,
+ static_cast<HValue*>(NULL),
+ weak_first_view_access));
+ Add<HStoreNamedField>(buffer, weak_first_view_access, obj);
+ } else {
+ Add<HStoreNamedField>(
+ obj,
+ HObjectAccess::ForJSArrayBufferViewBuffer(),
+ Add<HConstant>(static_cast<int32_t>(0)));
+ Add<HStoreNamedField>(obj,
+ HObjectAccess::ForJSArrayBufferViewWeakNext(),
+ graph()->GetConstantUndefined());
+ }
}
-void HOptimizedGraphBuilder::VisitDataViewInitialize(
+void HOptimizedGraphBuilder::GenerateDataViewInitialize(
CallRuntime* expr) {
ZoneList<Expression*>* arguments = expr->arguments();
@@ -8460,7 +8485,116 @@ void HOptimizedGraphBuilder::VisitDataViewInitialize(
}
-void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
+static Handle<Map> TypedArrayMap(Isolate* isolate,
+ ExternalArrayType array_type,
+ ElementsKind target_kind) {
+ Handle<Context> native_context = isolate->native_context();
+ Handle<JSFunction> fun;
+ switch (array_type) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ fun = Handle<JSFunction>(native_context->type##_array_fun()); \
+ break;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ }
+ Handle<Map> map(fun->initial_map());
+ return Map::AsElementsKind(map, target_kind);
+}
+
+
+HValue* HOptimizedGraphBuilder::BuildAllocateExternalElements(
+ ExternalArrayType array_type,
+ bool is_zero_byte_offset,
+ HValue* buffer, HValue* byte_offset, HValue* length) {
+ Handle<Map> external_array_map(
+ isolate()->heap()->MapForExternalArrayType(array_type));
+ HValue* elements =
+ Add<HAllocate>(
+ Add<HConstant>(ExternalArray::kAlignedSize),
+ HType::Tagged(),
+ NOT_TENURED,
+ external_array_map->instance_type());
+
+ AddStoreMapConstant(elements, external_array_map);
+
+ HValue* backing_store = Add<HLoadNamedField>(
+ buffer, static_cast<HValue*>(NULL),
+ HObjectAccess::ForJSArrayBufferBackingStore());
+
+ HValue* typed_array_start;
+ if (is_zero_byte_offset) {
+ typed_array_start = backing_store;
+ } else {
+ HInstruction* external_pointer =
+ AddUncasted<HAdd>(backing_store, byte_offset);
+ // Arguments are checked prior to call to TypedArrayInitialize,
+ // including byte_offset.
+ external_pointer->ClearFlag(HValue::kCanOverflow);
+ typed_array_start = external_pointer;
+ }
+
+
+ Add<HStoreNamedField>(elements,
+ HObjectAccess::ForExternalArrayExternalPointer(),
+ typed_array_start);
+
+ Add<HStoreNamedField>(elements,
+ HObjectAccess::ForFixedArrayLength(), length);
+ return elements;
+}
+
+
+HValue* HOptimizedGraphBuilder::BuildAllocateFixedTypedArray(
+ ExternalArrayType array_type, size_t element_size,
+ ElementsKind fixed_elements_kind,
+ HValue* byte_length, HValue* length) {
+ STATIC_ASSERT(
+ (FixedTypedArrayBase::kHeaderSize & kObjectAlignmentMask) == 0);
+ HValue* total_size;
+
+ // if fixed array's elements are not aligned to object's alignment,
+ // we need to align the whole array to object alignment.
+ if (element_size % kObjectAlignment != 0) {
+ total_size = BuildObjectSizeAlignment(
+ byte_length, FixedTypedArrayBase::kHeaderSize);
+ } else {
+ total_size = AddUncasted<HAdd>(byte_length,
+ Add<HConstant>(FixedTypedArrayBase::kHeaderSize));
+ total_size->ClearFlag(HValue::kCanOverflow);
+ }
+
+ Handle<Map> fixed_typed_array_map(
+ isolate()->heap()->MapForFixedTypedArray(array_type));
+ HValue* elements =
+ Add<HAllocate>(total_size, HType::Tagged(),
+ NOT_TENURED,
+ fixed_typed_array_map->instance_type());
+ AddStoreMapConstant(elements, fixed_typed_array_map);
+
+ Add<HStoreNamedField>(elements,
+ HObjectAccess::ForFixedArrayLength(),
+ length);
+ HValue* filler = Add<HConstant>(static_cast<int32_t>(0));
+
+ {
+ LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
+
+ HValue* key = builder.BeginBody(
+ Add<HConstant>(static_cast<int32_t>(0)),
+ length, Token::LT);
+ Add<HStoreKeyed>(elements, key, filler, fixed_elements_kind);
+
+ builder.EndBody();
+ }
+ Add<HStoreNamedField>(
+ elements, HObjectAccess::ForFixedArrayLength(), length);
+ return elements;
+}
+
+
+void HOptimizedGraphBuilder::GenerateTypedArrayInitialize(
CallRuntime* expr) {
ZoneList<Expression*>* arguments = expr->arguments();
@@ -8483,8 +8617,13 @@ void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
ASSERT(value->IsSmi());
int array_id = Smi::cast(*value)->value();
- CHECK_ALIVE(VisitForValue(arguments->at(kBufferArg)));
- HValue* buffer = Pop();
+ HValue* buffer;
+ if (!arguments->at(kBufferArg)->IsNullLiteral()) {
+ CHECK_ALIVE(VisitForValue(arguments->at(kBufferArg)));
+ buffer = Pop();
+ } else {
+ buffer = NULL;
+ }
HValue* byte_offset;
bool is_zero_byte_offset;
@@ -8498,6 +8637,7 @@ void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
CHECK_ALIVE(VisitForValue(arguments->at(kByteOffsetArg)));
byte_offset = Pop();
is_zero_byte_offset = false;
+ ASSERT(buffer != NULL);
}
CHECK_ALIVE(VisitForValue(arguments->at(kByteLengthArg)));
@@ -8510,13 +8650,24 @@ void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
byte_offset_smi.Then();
}
+ ExternalArrayType array_type =
+ kExternalInt8Array; // Bogus initialization.
+ size_t element_size = 1; // Bogus initialization.
+ ElementsKind external_elements_kind = // Bogus initialization.
+ EXTERNAL_INT8_ELEMENTS;
+ ElementsKind fixed_elements_kind = // Bogus initialization.
+ INT8_ELEMENTS;
+ Runtime::ArrayIdToTypeAndSize(array_id,
+ &array_type,
+ &external_elements_kind,
+ &fixed_elements_kind,
+ &element_size);
+
+
{ // byte_offset is Smi.
BuildArrayBufferViewInitialization<JSTypedArray>(
obj, buffer, byte_offset, byte_length);
- ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
- size_t element_size = 1; // Bogus initialization.
- Runtime::ArrayIdToTypeAndSize(array_id, &array_type, &element_size);
HInstruction* length = AddUncasted<HDiv>(byte_length,
Add<HConstant>(static_cast<int32_t>(element_size)));
@@ -8525,40 +8676,19 @@ void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
HObjectAccess::ForJSTypedArrayLength(),
length);
- Handle<Map> external_array_map(
- isolate()->heap()->MapForExternalArrayType(array_type));
-
- HValue* elements =
- Add<HAllocate>(
- Add<HConstant>(ExternalArray::kAlignedSize),
- HType::JSArray(),
- NOT_TENURED,
- external_array_map->instance_type());
-
- AddStoreMapConstant(elements, external_array_map);
-
- HValue* backing_store = Add<HLoadNamedField>(
- buffer, static_cast<HValue*>(NULL),
- HObjectAccess::ForJSArrayBufferBackingStore());
-
- HValue* typed_array_start;
- if (is_zero_byte_offset) {
- typed_array_start = backing_store;
+ HValue* elements;
+ if (buffer != NULL) {
+ elements = BuildAllocateExternalElements(
+ array_type, is_zero_byte_offset, buffer, byte_offset, length);
+ Handle<Map> obj_map = TypedArrayMap(
+ isolate(), array_type, external_elements_kind);
+ AddStoreMapConstant(obj, obj_map);
} else {
- HInstruction* external_pointer =
- AddUncasted<HAdd>(backing_store, byte_offset);
- // Arguments are checked prior to call to TypedArrayInitialize,
- // including byte_offset.
- external_pointer->ClearFlag(HValue::kCanOverflow);
- typed_array_start = external_pointer;
- }
-
- Add<HStoreNamedField>(elements,
- HObjectAccess::ForExternalArrayExternalPointer(),
- typed_array_start);
- Add<HStoreNamedField>(elements,
- HObjectAccess::ForFixedArrayLength(),
- length);
+ ASSERT(is_zero_byte_offset);
+ elements = BuildAllocateFixedTypedArray(
+ array_type, element_size, fixed_elements_kind,
+ byte_length, length);
+ }
Add<HStoreNamedField>(
obj, HObjectAccess::ForElementsPointer(), elements);
}
@@ -8566,19 +8696,35 @@ void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
if (!is_zero_byte_offset) {
byte_offset_smi.Else();
{ // byte_offset is not Smi.
- Push(Add<HPushArgument>(obj));
- VisitArgument(arguments->at(kArrayIdArg));
- Push(Add<HPushArgument>(buffer));
- Push(Add<HPushArgument>(byte_offset));
- Push(Add<HPushArgument>(byte_length));
+ Push(obj);
+ CHECK_ALIVE(VisitForValue(arguments->at(kArrayIdArg)));
+ Push(buffer);
+ Push(byte_offset);
+ Push(byte_length);
+ PushArgumentsFromEnvironment(kArgsLength);
Add<HCallRuntime>(expr->name(), expr->function(), kArgsLength);
- Drop(kArgsLength);
}
}
byte_offset_smi.End();
}
+void HOptimizedGraphBuilder::GenerateMaxSmi(CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+ HConstant* max_smi = New<HConstant>(static_cast<int32_t>(Smi::kMaxValue));
+ return ast_context()->ReturnInstruction(max_smi, expr->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateTypedArrayMaxSizeInHeap(
+ CallRuntime* expr) {
+ ASSERT(expr->arguments()->length() == 0);
+ HConstant* result = New<HConstant>(static_cast<int32_t>(
+ FLAG_typed_array_max_size_in_heap));
+ return ast_context()->ReturnInstruction(result, expr->id());
+}
+
+
void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -8590,21 +8736,8 @@ void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
const Runtime::Function* function = expr->function();
ASSERT(function != NULL);
- if (function->function_id == Runtime::kDataViewInitialize) {
- return VisitDataViewInitialize(expr);
- }
-
- if (function->function_id == Runtime::kTypedArrayInitialize) {
- return VisitTypedArrayInitialize(expr);
- }
-
- if (function->function_id == Runtime::kMaxSmi) {
- ASSERT(expr->arguments()->length() == 0);
- HConstant* max_smi = New<HConstant>(static_cast<int32_t>(Smi::kMaxValue));
- return ast_context()->ReturnInstruction(max_smi, expr->id());
- }
-
- if (function->intrinsic_type == Runtime::INLINE) {
+ if (function->intrinsic_type == Runtime::INLINE ||
+ function->intrinsic_type == Runtime::INLINE_OPTIMIZED) {
ASSERT(expr->name()->length() > 0);
ASSERT(expr->name()->Get(0) == '_');
// Call to an inline function.
@@ -8619,13 +8752,12 @@ void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
(this->*generator)(expr);
} else {
ASSERT(function->intrinsic_type == Runtime::RUNTIME);
- CHECK_ALIVE(VisitArgumentList(expr->arguments()));
-
Handle<String> name = expr->name();
int argument_count = expr->arguments()->length();
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ PushArgumentsFromEnvironment(argument_count);
HCallRuntime* call = New<HCallRuntime>(name, function,
argument_count);
- Drop(argument_count);
return ast_context()->ReturnInstruction(call, expr->id());
}
}
@@ -8656,7 +8788,7 @@ void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
HValue* function = AddLoadJSBuiltin(Builtins::DELETE);
Add<HPushArgument>(obj);
Add<HPushArgument>(key);
- Add<HPushArgument>(Add<HConstant>(function_strict_mode_flag()));
+ Add<HPushArgument>(Add<HConstant>(function_strict_mode()));
// TODO(olivf) InvokeFunction produces a check for the parameter count,
// even though we are certain to pass the correct number of arguments here.
HInstruction* instr = New<HInvokeFunction>(function, 3);
@@ -8802,7 +8934,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
Expression* target = expr->expression();
VariableProxy* proxy = target->AsVariableProxy();
Property* prop = target->AsProperty();
@@ -8820,7 +8952,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
if (proxy != NULL) {
Variable* var = proxy->var();
- if (var->mode() == CONST) {
+ if (var->mode() == CONST_LEGACY) {
return Bailout(kUnsupportedCountOperationWithConst);
}
// Argument of the count operation is a variable, not a property.
@@ -8944,13 +9076,7 @@ static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
}
if (!const32_minus_sa->IsSub()) return false;
HSub* sub = HSub::cast(const32_minus_sa);
- if (sa != sub->right()) return false;
- HValue* const32 = sub->left();
- if (!const32->IsConstant() ||
- HConstant::cast(const32)->Integer32Value() != 32) {
- return false;
- }
- return (sub->right() == sa);
+ return sub->left()->EqualsInteger32Constant(32) && sub->right() == sa;
}
@@ -8999,7 +9125,7 @@ bool CanBeZero(HValue* right) {
HValue* HGraphBuilder::EnforceNumberType(HValue* number,
Type* expected) {
- if (expected->Is(Type::Smi())) {
+ if (expected->Is(Type::SignedSmall())) {
return AddUncasted<HForceRepresentation>(number, Representation::Smi());
}
if (expected->Is(Type::Signed32())) {
@@ -9042,7 +9168,7 @@ HValue* HGraphBuilder::TruncateToNumber(HValue* value, Type** expected) {
if (expected_obj->Is(Type::Undefined(zone()))) {
// This is already done by HChange.
- *expected = Type::Union(expected_number, Type::Double(zone()), zone());
+ *expected = Type::Union(expected_number, Type::Float(zone()), zone());
return value;
}
@@ -9078,13 +9204,12 @@ HValue* HOptimizedGraphBuilder::BuildBinaryOperation(
// after phis, which are the result of BuildBinaryOperation when we
// inlined some complex subgraph.
if (result->HasObservableSideEffects() || result->IsPhi()) {
- if (push_sim_result == NO_PUSH_BEFORE_SIMULATE) {
- Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
- } else {
- ASSERT(push_sim_result == PUSH_BEFORE_SIMULATE);
+ if (push_sim_result == PUSH_BEFORE_SIMULATE) {
Push(result);
Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
Drop(1);
+ } else {
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
}
}
return result;
@@ -9238,21 +9363,15 @@ HValue* HGraphBuilder::BuildBinaryOperation(
instr = AddUncasted<HMul>(left, right);
break;
case Token::MOD: {
- if (fixed_right_arg.has_value) {
- if (right->IsConstant()) {
- HConstant* c_right = HConstant::cast(right);
- if (c_right->HasInteger32Value()) {
- ASSERT_EQ(fixed_right_arg.value, c_right->Integer32Value());
- }
- } else {
- HConstant* fixed_right = Add<HConstant>(
- static_cast<int>(fixed_right_arg.value));
- IfBuilder if_same(this);
- if_same.If<HCompareNumericAndBranch>(right, fixed_right, Token::EQ);
- if_same.Then();
- if_same.ElseDeopt("Unexpected RHS of binary operation");
- right = fixed_right;
- }
+ if (fixed_right_arg.has_value &&
+ !right->EqualsInteger32Constant(fixed_right_arg.value)) {
+ HConstant* fixed_right = Add<HConstant>(
+ static_cast<int>(fixed_right_arg.value));
+ IfBuilder if_same(this);
+ if_same.If<HCompareNumericAndBranch>(right, fixed_right, Token::EQ);
+ if_same.Then();
+ if_same.ElseDeopt("Unexpected RHS of binary operation");
+ right = fixed_right;
}
instr = AddUncasted<HMod>(left, right);
break;
@@ -9469,9 +9588,11 @@ void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
BuildBinaryOperation(expr, left, right,
ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
: PUSH_BEFORE_SIMULATE);
- if (FLAG_emit_opt_code_positions && result->IsBinaryOperation()) {
+ if (FLAG_hydrogen_track_positions && result->IsBinaryOperation()) {
HBinaryOperation::cast(result)->SetOperandPositions(
- zone(), expr->left()->position(), expr->right()->position());
+ zone(),
+ ScriptPositionToSourcePosition(expr->left()->position()),
+ ScriptPositionToSourcePosition(expr->right()->position()));
}
return ast_context()->ReturnValue(result);
}
@@ -9505,7 +9626,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
// Check for a few fast cases. The AST visiting behavior must be in sync
// with the full codegen: We don't push both left and right values onto
@@ -9540,7 +9661,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
- if (FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
HValue* right = Pop();
HValue* left = Pop();
@@ -9600,9 +9721,14 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
return ast_context()->ReturnInstruction(result, expr->id());
}
+ PushBeforeSimulateBehavior push_behavior =
+ ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
+ : PUSH_BEFORE_SIMULATE;
HControlInstruction* compare = BuildCompareInstruction(
op, left, right, left_type, right_type, combined_type,
- expr->left()->position(), expr->right()->position(), expr->id());
+ ScriptPositionToSourcePosition(expr->left()->position()),
+ ScriptPositionToSourcePosition(expr->right()->position()),
+ push_behavior, expr->id());
if (compare == NULL) return; // Bailed out.
return ast_context()->ReturnControl(compare, expr->id());
}
@@ -9615,8 +9741,9 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
Type* left_type,
Type* right_type,
Type* combined_type,
- int left_position,
- int right_position,
+ HSourcePosition left_position,
+ HSourcePosition right_position,
+ PushBeforeSimulateBehavior push_sim_result,
BailoutId bailout_id) {
// Cases handled below depend on collected type feedback. They should
// soft deoptimize when there is no type feedback.
@@ -9641,7 +9768,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
AddCheckMap(operand_to_check, map);
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
- if (FLAG_emit_opt_code_positions) {
+ if (FLAG_hydrogen_track_positions) {
result->set_operand_position(zone(), 0, left_position);
result->set_operand_position(zone(), 1, right_position);
}
@@ -9681,9 +9808,13 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
result->set_observed_input_representation(1, left_rep);
result->set_observed_input_representation(2, right_rep);
if (result->HasObservableSideEffects()) {
- Push(result);
- AddSimulate(bailout_id, REMOVABLE_SIMULATE);
- Drop(1);
+ if (push_sim_result == PUSH_BEFORE_SIMULATE) {
+ Push(result);
+ AddSimulate(bailout_id, REMOVABLE_SIMULATE);
+ Drop(1);
+ } else {
+ AddSimulate(bailout_id, REMOVABLE_SIMULATE);
+ }
}
// TODO(jkummerow): Can we make this more efficient?
HBranch* branch = New<HBranch>(result);
@@ -9692,7 +9823,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
HCompareNumericAndBranch* result =
New<HCompareNumericAndBranch>(left, right, op);
result->set_observed_input_representation(left_rep, right_rep);
- if (FLAG_emit_opt_code_positions) {
+ if (FLAG_hydrogen_track_positions) {
result->SetOperandPositions(zone(), left_position, right_position);
}
return result;
@@ -9708,7 +9839,7 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
- if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
CHECK_ALIVE(VisitForValue(sub_expr));
HValue* value = Pop();
if (expr->op() == Token::EQ_STRICT) {
@@ -9780,22 +9911,26 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
elements->Size() : 0;
+ if (pretenure_flag == TENURED &&
+ elements->map() == isolate()->heap()->fixed_cow_array_map() &&
+ isolate()->heap()->InNewSpace(*elements)) {
+ // If we would like to pretenure a fixed cow array, we must ensure that the
+ // array is already in old space, otherwise we'll create too many old-to-
+ // new-space pointers (overflowing the store buffer).
+ elements = Handle<FixedArrayBase>(
+ isolate()->factory()->CopyAndTenureFixedCOWArray(
+ Handle<FixedArray>::cast(elements)));
+ boilerplate_object->set_elements(*elements);
+ }
+
HInstruction* object_elements = NULL;
if (elements_size > 0) {
HValue* object_elements_size = Add<HConstant>(elements_size);
if (boilerplate_object->HasFastDoubleElements()) {
- // Allocation folding will not be able to fold |object| and
- // |object_elements| together if they are pre-tenured.
- if (pretenure_flag == TENURED) {
- HConstant* empty_fixed_array = Add<HConstant>(
- isolate()->factory()->empty_fixed_array());
- Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
- empty_fixed_array);
- }
- object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
+ object_elements = Add<HAllocate>(object_elements_size, HType::Tagged(),
pretenure_flag, FIXED_DOUBLE_ARRAY_TYPE, site_context->current());
} else {
- object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
+ object_elements = Add<HAllocate>(object_elements_size, HType::Tagged(),
pretenure_flag, FIXED_ARRAY_TYPE, site_context->current());
}
}
@@ -10028,7 +10163,7 @@ void HOptimizedGraphBuilder::VisitDeclarations(
for (int i = 0; i < globals_.length(); ++i) array->set(i, *globals_.at(i));
int flags = DeclareGlobalsEvalFlag::encode(current_info()->is_eval()) |
DeclareGlobalsNativeFlag::encode(current_info()->is_native()) |
- DeclareGlobalsLanguageMode::encode(current_info()->language_mode());
+ DeclareGlobalsStrictMode::encode(current_info()->strict_mode());
Add<HDeclareGlobals>(array, flags);
globals_.Clear();
}
@@ -10040,7 +10175,7 @@ void HOptimizedGraphBuilder::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_.Add(variable->name(), zone());
@@ -10350,12 +10485,13 @@ void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
CallRuntime* call) {
ASSERT(call->arguments()->length() == 3);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ // We need to follow the evaluation order of full codegen.
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* string = Pop();
HValue* value = Pop();
HValue* index = Pop();
- HValue* string = Pop();
Add<HSeqStringSetChar>(String::ONE_BYTE_ENCODING, string,
index, value);
Add<HSimulate>(call->id(), FIXED_SIMULATE);
@@ -10366,12 +10502,13 @@ void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
CallRuntime* call) {
ASSERT(call->arguments()->length() == 3);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ // We need to follow the evaluation order of full codegen.
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* string = Pop();
HValue* value = Pop();
HValue* index = Pop();
- HValue* string = Pop();
Add<HSeqStringSetChar>(String::TWO_BYTE_ENCODING, string,
index, value);
Add<HSimulate>(call->id(), FIXED_SIMULATE);
@@ -10395,14 +10532,23 @@ void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
Add<HStoreNamedField>(object,
HObjectAccess::ForObservableJSObjectOffset(JSValue::kValueOffset),
value);
+ if (!ast_context()->IsEffect()) {
+ Push(value);
+ }
Add<HSimulate>(call->id(), FIXED_SIMULATE);
}
if_objectisvalue.Else();
{
// Nothing to do in this case.
+ if (!ast_context()->IsEffect()) {
+ Push(value);
+ }
Add<HSimulate>(call->id(), FIXED_SIMULATE);
}
if_objectisvalue.End();
+ if (!ast_context()->IsEffect()) {
+ Drop(1);
+ }
return ast_context()->ReturnValue(value);
}
@@ -10477,9 +10623,9 @@ void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
// Fast support for SubString.
void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ PushArgumentsFromEnvironment(call->arguments()->length());
HCallStub* result = New<HCallStub>(CodeStub::SubString, 3);
- Drop(3);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -10487,9 +10633,9 @@ void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
// Fast support for StringCompare.
void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ PushArgumentsFromEnvironment(call->arguments()->length());
HCallStub* result = New<HCallStub>(CodeStub::StringCompare, 2);
- Drop(2);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -10497,9 +10643,38 @@ void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
// Support for direct calls from JavaScript to native RegExp code.
void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
ASSERT_EQ(4, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ PushArgumentsFromEnvironment(call->arguments()->length());
HCallStub* result = New<HCallStub>(CodeStub::RegExpExec, 4);
- Drop(4);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateDoubleLo(CallRuntime* call) {
+ ASSERT_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HInstruction* result = NewUncasted<HDoubleBits>(value, HDoubleBits::LOW);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateDoubleHi(CallRuntime* call) {
+ ASSERT_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HInstruction* result = NewUncasted<HDoubleBits>(value, HDoubleBits::HIGH);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateConstructDouble(CallRuntime* call) {
+ ASSERT_EQ(2, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+ HValue* lo = Pop();
+ HValue* hi = Pop();
+ HInstruction* result = NewUncasted<HConstructDouble>(hi, lo);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -10540,12 +10715,11 @@ void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
int arg_count = call->arguments()->length() - 1;
ASSERT(arg_count >= 1); // There's always at least a receiver.
- for (int i = 0; i < arg_count; ++i) {
- CHECK_ALIVE(VisitArgument(call->arguments()->at(i)));
- }
- CHECK_ALIVE(VisitForValue(call->arguments()->last()));
-
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ // The function is the last argument
HValue* function = Pop();
+ // Push the arguments to the stack
+ PushArgumentsFromEnvironment(arg_count);
IfBuilder if_is_jsfunction(this);
if_is_jsfunction.If<HHasInstanceTypeAndBranch>(function, JS_FUNCTION_TYPE);
@@ -10554,7 +10728,6 @@ void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
{
HInstruction* invoke_result =
Add<HInvokeFunction>(function, arg_count);
- Drop(arg_count);
if (!ast_context()->IsEffect()) {
Push(invoke_result);
}
@@ -10565,7 +10738,6 @@ void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
{
HInstruction* call_result =
Add<HCallFunction>(function, arg_count);
- Drop(arg_count);
if (!ast_context()->IsEffect()) {
Push(call_result);
}
@@ -10945,7 +11117,10 @@ void HTracer::TraceCompilation(CompilationInfo* info) {
if (info->IsOptimizing()) {
Handle<String> name = info->function()->debug_name();
PrintStringProperty("name", name->ToCString().get());
- PrintStringProperty("method", name->ToCString().get());
+ PrintIndent();
+ trace_.Add("method \"%s:%d\"\n",
+ name->ToCString().get(),
+ info->optimization_id());
} else {
CodeStub::Major major_key = info->code_stub()->MajorKey();
PrintStringProperty("name", CodeStub::MajorName(major_key, false));
@@ -11059,14 +11234,22 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
Tag HIR_tag(this, "HIR");
for (HInstructionIterator it(current); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
- int bci = FLAG_emit_opt_code_positions && instruction->has_position() ?
- instruction->position() : 0;
int uses = instruction->UseCount();
PrintIndent();
- trace_.Add("%d %d ", bci, uses);
+ trace_.Add("0 %d ", uses);
instruction->PrintNameTo(&trace_);
trace_.Add(" ");
instruction->PrintTo(&trace_);
+ if (FLAG_hydrogen_track_positions &&
+ instruction->has_position() &&
+ instruction->position().raw() != 0) {
+ const HSourcePosition pos = instruction->position();
+ trace_.Add(" pos:");
+ if (pos.inlining_id() != 0) {
+ trace_.Add("%d_", pos.inlining_id());
+ }
+ trace_.Add("%d", pos.position());
+ }
trace_.Add(" <|@\n");
}
}
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index b8344ef9c..6d81307e2 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -110,7 +110,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
bool IsFinished() const { return end_ != NULL; }
void AddPhi(HPhi* phi);
void RemovePhi(HPhi* phi);
- void AddInstruction(HInstruction* instr, int position);
+ void AddInstruction(HInstruction* instr, HSourcePosition position);
bool Dominates(HBasicBlock* other) const;
bool EqualToOrDominates(HBasicBlock* other) const;
int LoopNestingDepth() const;
@@ -137,7 +137,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
int PredecessorIndexOf(HBasicBlock* predecessor) const;
HPhi* AddNewPhi(int merged_index);
HSimulate* AddNewSimulate(BailoutId ast_id,
- int position,
+ HSourcePosition position,
RemovableSimulate removable = FIXED_SIMULATE) {
HSimulate* instr = CreateSimulate(ast_id, removable);
AddInstruction(instr, position);
@@ -174,6 +174,8 @@ class HBasicBlock V8_FINAL : public ZoneObject {
dominates_loop_successors_ = true;
}
+ void MarkSuccEdgeUnreachable(int succ);
+
inline Zone* zone() const;
#ifdef DEBUG
@@ -184,13 +186,13 @@ class HBasicBlock V8_FINAL : public ZoneObject {
friend class HGraphBuilder;
HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
- void Finish(HControlInstruction* last, int position);
- void FinishExit(HControlInstruction* instruction, int position);
+ void Finish(HControlInstruction* last, HSourcePosition position);
+ void FinishExit(HControlInstruction* instruction, HSourcePosition position);
void Goto(HBasicBlock* block,
- int position,
+ HSourcePosition position,
FunctionState* state = NULL,
bool add_simulate = true);
- void GotoNoSimulate(HBasicBlock* block, int position) {
+ void GotoNoSimulate(HBasicBlock* block, HSourcePosition position) {
Goto(block, position, NULL, false);
}
@@ -198,7 +200,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
// instruction and updating the bailout environment.
void AddLeaveInlined(HValue* return_value,
FunctionState* state,
- int position);
+ HSourcePosition position);
private:
void RegisterPredecessor(HBasicBlock* pred);
@@ -469,6 +471,16 @@ class HGraph V8_FINAL : public ZoneObject {
void DecrementInNoSideEffectsScope() { no_side_effects_scope_count_--; }
bool IsInsideNoSideEffectsScope() { return no_side_effects_scope_count_ > 0; }
+ // If we are tracking source positions then this function assigns a unique
+ // identifier to each inlining and dumps function source if it was inlined
+ // for the first time during the current optimization.
+ int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
+ HSourcePosition position);
+
+ // Converts given HSourcePosition to the absolute offset from the start of
+ // the corresponding script.
+ int SourcePositionToScriptPosition(HSourcePosition position);
+
private:
HConstant* ReinsertConstantIfNecessary(HConstant* constant);
HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
@@ -514,6 +526,23 @@ class HGraph V8_FINAL : public ZoneObject {
int no_side_effects_scope_count_;
bool disallow_adding_new_values_;
+ class InlinedFunctionInfo {
+ public:
+ explicit InlinedFunctionInfo(Handle<SharedFunctionInfo> shared)
+ : shared_(shared), start_position_(shared->start_position()) {
+ }
+
+ Handle<SharedFunctionInfo> shared() const { return shared_; }
+ int start_position() const { return start_position_; }
+
+ private:
+ Handle<SharedFunctionInfo> shared_;
+ int start_position_;
+ };
+
+ int next_inline_id_;
+ ZoneList<InlinedFunctionInfo> inlined_functions_;
+
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@@ -880,7 +909,8 @@ class FunctionState V8_FINAL {
public:
FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
- InliningKind inlining_kind);
+ InliningKind inlining_kind,
+ int inlining_id);
~FunctionState();
CompilationInfo* compilation_info() { return compilation_info_; }
@@ -910,6 +940,8 @@ class FunctionState V8_FINAL {
bool arguments_pushed() { return arguments_elements() != NULL; }
+ int inlining_id() const { return inlining_id_; }
+
private:
HOptimizedGraphBuilder* owner_;
@@ -939,6 +971,9 @@ class FunctionState V8_FINAL {
HArgumentsObject* arguments_object_;
HArgumentsElements* arguments_elements_;
+ int inlining_id_;
+ HSourcePosition outer_source_position_;
+
FunctionState* outer_;
};
@@ -996,6 +1031,8 @@ class HAllocationMode V8_FINAL BASE_EMBEDDED {
: current_site_(current_site), pretenure_flag_(NOT_TENURED) {}
explicit HAllocationMode(PretenureFlag pretenure_flag)
: current_site_(NULL), pretenure_flag_(pretenure_flag) {}
+ HAllocationMode()
+ : current_site_(NULL), pretenure_flag_(NOT_TENURED) {}
HValue* current_site() const { return current_site_; }
Handle<AllocationSite> feedback_site() const { return feedback_site_; }
@@ -1022,7 +1059,8 @@ class HGraphBuilder {
: info_(info),
graph_(NULL),
current_block_(NULL),
- position_(RelocInfo::kNoPosition) {}
+ position_(HSourcePosition::Unknown()),
+ start_position_(0) {}
virtual ~HGraphBuilder() {}
HBasicBlock* current_block() const { return current_block_; }
@@ -1052,7 +1090,7 @@ class HGraphBuilder {
HBasicBlock* target,
FunctionState* state = NULL,
bool add_simulate = true) {
- from->Goto(target, position_, state, add_simulate);
+ from->Goto(target, source_position(), state, add_simulate);
}
void Goto(HBasicBlock* target,
FunctionState* state = NULL,
@@ -1068,7 +1106,7 @@ class HGraphBuilder {
void AddLeaveInlined(HBasicBlock* block,
HValue* return_value,
FunctionState* state) {
- block->AddLeaveInlined(return_value, state, position_);
+ block->AddLeaveInlined(return_value, state, source_position());
}
void AddLeaveInlined(HValue* return_value, FunctionState* state) {
return AddLeaveInlined(current_block(), return_value, state);
@@ -1274,8 +1312,6 @@ class HGraphBuilder {
void AddSimulate(BailoutId id, RemovableSimulate removable = FIXED_SIMULATE);
- int position() const { return position_; }
-
protected:
virtual bool BuildGraph() = 0;
@@ -1294,7 +1330,7 @@ class HGraphBuilder {
HValue* length,
HValue* key,
bool is_js_array,
- bool is_store);
+ PropertyAccessType access_type);
HValue* BuildCopyElementsOnWrite(HValue* object,
HValue* elements,
@@ -1336,6 +1372,10 @@ class HGraphBuilder {
HValue* dst_offset,
String::Encoding dst_encoding,
HValue* length);
+
+ // Align an object size to object alignment boundary
+ HValue* BuildObjectSizeAlignment(HValue* unaligned_size, int header_size);
+
// Both operands are non-empty strings.
HValue* BuildUncheckedStringAdd(HValue* left,
HValue* right,
@@ -1351,7 +1391,7 @@ class HGraphBuilder {
HValue* val,
bool is_js_array,
ElementsKind elements_kind,
- bool is_store,
+ PropertyAccessType access_type,
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode);
@@ -1361,11 +1401,9 @@ class HGraphBuilder {
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
- bool is_store,
+ PropertyAccessType access_type,
LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE);
- HLoadNamedField* BuildLoadNamedField(HValue* object, HObjectAccess access);
- HInstruction* AddLoadNamedField(HValue* object, HObjectAccess access);
HInstruction* AddLoadStringInstanceType(HValue* string);
HInstruction* AddLoadStringLength(HValue* string);
HStoreNamedField* AddStoreMapNoWriteBarrier(HValue* object, HValue* map) {
@@ -1404,8 +1442,7 @@ class HGraphBuilder {
HValue* EnforceNumberType(HValue* number, Type* expected);
HValue* TruncateToNumber(HValue* value, Type** expected);
- void FinishExitWithHardDeoptimization(const char* reason,
- HBasicBlock* continuation);
+ void FinishExitWithHardDeoptimization(const char* reason);
void AddIncrementCounter(StatsCounter* counter);
@@ -1777,6 +1814,27 @@ class HGraphBuilder {
protected:
void SetSourcePosition(int position) {
ASSERT(position != RelocInfo::kNoPosition);
+ position_.set_position(position - start_position_);
+ }
+
+ void EnterInlinedSource(int start_position, int id) {
+ if (FLAG_hydrogen_track_positions) {
+ start_position_ = start_position;
+ position_.set_inlining_id(id);
+ }
+ }
+
+ // Convert the given absolute offset from the start of the script to
+ // the HSourcePosition assuming that this position corresponds to the
+ // same function as current position_.
+ HSourcePosition ScriptPositionToSourcePosition(int position) {
+ HSourcePosition pos = position_;
+ pos.set_position(position - start_position_);
+ return pos;
+ }
+
+ HSourcePosition source_position() { return position_; }
+ void set_source_position(HSourcePosition position) {
position_ = position;
}
@@ -1796,9 +1854,6 @@ class HGraphBuilder {
HValue* mask,
int current_probe);
- void PadEnvironmentForContinuation(HBasicBlock* from,
- HBasicBlock* continuation);
-
template <class I>
I* AddInstructionTyped(I* instr) {
return I::cast(AddInstruction(instr));
@@ -1807,7 +1862,8 @@ class HGraphBuilder {
CompilationInfo* info_;
HGraph* graph_;
HBasicBlock* current_block_;
- int position_;
+ HSourcePosition position_;
+ int start_position_;
};
@@ -2059,9 +2115,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void ClearInlinedTestContext() {
function_state()->ClearInlinedTestContext();
}
- StrictModeFlag function_strict_mode_flag() {
- return function_state()->compilation_info()->is_classic_mode()
- ? kNonStrictMode : kStrictMode;
+ StrictMode function_strict_mode() {
+ return function_state()->compilation_info()->strict_mode();
}
// Generators for inline runtime functions.
@@ -2069,7 +2124,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void Generate##Name(CallRuntime* call);
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
- INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
+ INLINE_OPTIMIZED_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
#undef INLINE_FUNCTION_GENERATOR_DECLARATION
void VisitDelete(UnaryOperation* expr);
@@ -2164,11 +2219,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HBasicBlock* true_block,
HBasicBlock* false_block);
- // Visit an argument subexpression and emit a push to the outgoing arguments.
- void VisitArgument(Expression* expr);
-
- void VisitArgumentList(ZoneList<Expression*>* arguments);
-
// Visit a list of expressions from left to right, each in a value context.
void VisitExpressions(ZoneList<Expression*>* exprs);
@@ -2187,8 +2237,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
Type* ToType(Handle<Map> map) { return IC::MapToType<Type>(map, zone()); }
private:
- enum PropertyAccessType { LOAD, STORE };
-
// Helpers for flow graph construction.
enum GlobalPropertyAccess {
kUseCell,
@@ -2196,7 +2244,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
};
GlobalPropertyAccess LookupGlobalProperty(Variable* var,
LookupResult* lookup,
- bool is_store);
+ PropertyAccessType access_type);
void EnsureArgumentsArePushedForAccess();
bool TryArgumentsAccess(Property* expr);
@@ -2213,7 +2261,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* implicit_return_value,
BailoutId ast_id,
BailoutId return_id,
- InliningKind inlining_kind);
+ InliningKind inlining_kind,
+ HSourcePosition position);
bool TryInlineCall(Call* expr);
bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
@@ -2277,13 +2326,18 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
SmallMapList* types,
Handle<String> name);
- void VisitTypedArrayInitialize(CallRuntime* expr);
+ HValue* BuildAllocateExternalElements(
+ ExternalArrayType array_type,
+ bool is_zero_byte_offset,
+ HValue* buffer, HValue* byte_offset, HValue* length);
+ HValue* BuildAllocateFixedTypedArray(
+ ExternalArrayType array_type, size_t element_size,
+ ElementsKind fixed_elements_kind,
+ HValue* byte_length, HValue* length);
bool IsCallNewArrayInlineable(CallNew* expr);
void BuildInlinedCallNewArray(CallNew* expr);
- void VisitDataViewInitialize(CallRuntime* expr);
-
class PropertyAccessInfo {
public:
PropertyAccessInfo(HOptimizedGraphBuilder* builder,
@@ -2416,23 +2470,27 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void HandleLiteralCompareNil(CompareOperation* expr,
Expression* sub_expr,
NilValue nil);
- HControlInstruction* BuildCompareInstruction(Token::Value op,
- HValue* left,
- HValue* right,
- Type* left_type,
- Type* right_type,
- Type* combined_type,
- int left_position,
- int right_position,
- BailoutId bailout_id);
-
- HInstruction* BuildStringCharCodeAt(HValue* string,
- HValue* index);
enum PushBeforeSimulateBehavior {
PUSH_BEFORE_SIMULATE,
NO_PUSH_BEFORE_SIMULATE
};
+
+ HControlInstruction* BuildCompareInstruction(
+ Token::Value op,
+ HValue* left,
+ HValue* right,
+ Type* left_type,
+ Type* right_type,
+ Type* combined_type,
+ HSourcePosition left_position,
+ HSourcePosition right_position,
+ PushBeforeSimulateBehavior push_sim_result,
+ BailoutId bailout_id);
+
+ HInstruction* BuildStringCharCodeAt(HValue* string,
+ HValue* index);
+
HValue* BuildBinaryOperation(
BinaryOperation* expr,
HValue* left,
@@ -2440,8 +2498,10 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
PushBeforeSimulateBehavior push_sim_result);
HInstruction* BuildIncrement(bool returns_original_input,
CountOperation* expr);
- HInstruction* BuildLoadKeyedGeneric(HValue* object,
- HValue* key);
+ HInstruction* BuildKeyedGeneric(PropertyAccessType access_type,
+ HValue* object,
+ HValue* key,
+ HValue* value);
HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
HValue* key,
@@ -2455,14 +2515,14 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* val,
HValue* dependency,
Handle<Map> map,
- bool is_store,
+ PropertyAccessType access_type,
KeyedAccessStoreMode store_mode);
HValue* HandlePolymorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
SmallMapList* maps,
- bool is_store,
+ PropertyAccessType access_type,
KeyedAccessStoreMode store_mode,
bool* has_side_effects);
@@ -2470,12 +2530,14 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* key,
HValue* val,
Expression* expr,
- bool is_store,
+ PropertyAccessType access_type,
bool* has_side_effects);
- HInstruction* BuildLoadNamedGeneric(HValue* object,
- Handle<String> name,
- bool is_uninitialized = false);
+ HInstruction* BuildNamedGeneric(PropertyAccessType access,
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ bool is_uninitialized = false);
HCheckMaps* AddCheckMap(HValue* object, Handle<Map> map);
@@ -2499,16 +2561,11 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
BailoutId return_id,
bool is_uninitialized = false);
+ HInstruction* BuildLoadNamedField(PropertyAccessInfo* info,
+ HValue* checked_object);
HInstruction* BuildStoreNamedField(PropertyAccessInfo* info,
HValue* checked_object,
HValue* value);
- HInstruction* BuildStoreNamedGeneric(HValue* object,
- Handle<String> name,
- HValue* value,
- bool is_uninitialized = false);
- HInstruction* BuildStoreKeyedGeneric(HValue* object,
- HValue* key,
- HValue* value);
HValue* BuildContextChainWalk(Variable* var);
diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/i18n.cc
index 5c97c6b8e..d5ea77dbd 100644
--- a/deps/v8/src/i18n.cc
+++ b/deps/v8/src/i18n.cc
@@ -163,7 +163,7 @@ void SetResolvedDateSettings(Isolate* isolate,
reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
pattern.length())),
NONE,
- kNonStrictMode);
+ SLOPPY);
// Set time zone and calendar.
const icu::Calendar* calendar = date_format->getCalendar();
@@ -173,7 +173,7 @@ void SetResolvedDateSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("calendar")),
isolate->factory()->NewStringFromAscii(CStrVector(calendar_name)),
NONE,
- kNonStrictMode);
+ SLOPPY);
const icu::TimeZone& tz = calendar->getTimeZone();
icu::UnicodeString time_zone;
@@ -188,7 +188,7 @@ void SetResolvedDateSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("timeZone")),
isolate->factory()->NewStringFromAscii(CStrVector("UTC")),
NONE,
- kNonStrictMode);
+ SLOPPY);
} else {
JSObject::SetProperty(
resolved,
@@ -199,7 +199,7 @@ void SetResolvedDateSettings(Isolate* isolate,
canonical_time_zone.getBuffer()),
canonical_time_zone.length())),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
}
@@ -216,14 +216,14 @@ void SetResolvedDateSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
isolate->factory()->NewStringFromAscii(CStrVector(ns)),
NONE,
- kNonStrictMode);
+ SLOPPY);
} else {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
isolate->factory()->undefined_value(),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
delete numbering_system;
@@ -238,7 +238,7 @@ void SetResolvedDateSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector(result)),
NONE,
- kNonStrictMode);
+ SLOPPY);
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(
@@ -246,7 +246,7 @@ void SetResolvedDateSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector("und")),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
}
@@ -389,7 +389,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
pattern.length())),
NONE,
- kNonStrictMode);
+ SLOPPY);
// Set resolved currency code in options.currency if not empty.
icu::UnicodeString currency(number_format->getCurrency());
@@ -402,7 +402,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
reinterpret_cast<const uint16_t*>(currency.getBuffer()),
currency.length())),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
// Ugly hack. ICU doesn't expose numbering system in any way, so we have
@@ -418,14 +418,14 @@ void SetResolvedNumberSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
isolate->factory()->NewStringFromAscii(CStrVector(ns)),
NONE,
- kNonStrictMode);
+ SLOPPY);
} else {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
isolate->factory()->undefined_value(),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
delete numbering_system;
@@ -434,7 +434,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("useGrouping")),
isolate->factory()->ToBoolean(number_format->isGroupingUsed()),
NONE,
- kNonStrictMode);
+ SLOPPY);
JSObject::SetProperty(
resolved,
@@ -443,7 +443,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
isolate->factory()->NewNumberFromInt(
number_format->getMinimumIntegerDigits()),
NONE,
- kNonStrictMode);
+ SLOPPY);
JSObject::SetProperty(
resolved,
@@ -452,7 +452,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
isolate->factory()->NewNumberFromInt(
number_format->getMinimumFractionDigits()),
NONE,
- kNonStrictMode);
+ SLOPPY);
JSObject::SetProperty(
resolved,
@@ -461,7 +461,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
isolate->factory()->NewNumberFromInt(
number_format->getMaximumFractionDigits()),
NONE,
- kNonStrictMode);
+ SLOPPY);
Handle<String> key = isolate->factory()->NewStringFromAscii(
CStrVector("minimumSignificantDigits"));
@@ -473,7 +473,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
isolate->factory()->NewNumberFromInt(
number_format->getMinimumSignificantDigits()),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
key = isolate->factory()->NewStringFromAscii(
@@ -486,7 +486,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
isolate->factory()->NewNumberFromInt(
number_format->getMaximumSignificantDigits()),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
// Set the locale
@@ -500,7 +500,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector(result)),
NONE,
- kNonStrictMode);
+ SLOPPY);
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(
@@ -508,7 +508,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector("und")),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
}
@@ -589,7 +589,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->ToBoolean(
collator->getAttribute(UCOL_NUMERIC_COLLATION, status) == UCOL_ON),
NONE,
- kNonStrictMode);
+ SLOPPY);
switch (collator->getAttribute(UCOL_CASE_FIRST, status)) {
case UCOL_LOWER_FIRST:
@@ -598,7 +598,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")),
isolate->factory()->NewStringFromAscii(CStrVector("lower")),
NONE,
- kNonStrictMode);
+ SLOPPY);
break;
case UCOL_UPPER_FIRST:
JSObject::SetProperty(
@@ -606,7 +606,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")),
isolate->factory()->NewStringFromAscii(CStrVector("upper")),
NONE,
- kNonStrictMode);
+ SLOPPY);
break;
default:
JSObject::SetProperty(
@@ -614,7 +614,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")),
isolate->factory()->NewStringFromAscii(CStrVector("false")),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
switch (collator->getAttribute(UCOL_STRENGTH, status)) {
@@ -624,7 +624,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("strength")),
isolate->factory()->NewStringFromAscii(CStrVector("primary")),
NONE,
- kNonStrictMode);
+ SLOPPY);
// case level: true + s1 -> case, s1 -> base.
if (UCOL_ON == collator->getAttribute(UCOL_CASE_LEVEL, status)) {
@@ -633,14 +633,14 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
isolate->factory()->NewStringFromAscii(CStrVector("case")),
NONE,
- kNonStrictMode);
+ SLOPPY);
} else {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
isolate->factory()->NewStringFromAscii(CStrVector("base")),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
break;
}
@@ -650,13 +650,13 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("strength")),
isolate->factory()->NewStringFromAscii(CStrVector("secondary")),
NONE,
- kNonStrictMode);
+ SLOPPY);
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
isolate->factory()->NewStringFromAscii(CStrVector("accent")),
NONE,
- kNonStrictMode);
+ SLOPPY);
break;
case UCOL_TERTIARY:
JSObject::SetProperty(
@@ -664,13 +664,13 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("strength")),
isolate->factory()->NewStringFromAscii(CStrVector("tertiary")),
NONE,
- kNonStrictMode);
+ SLOPPY);
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
isolate->factory()->NewStringFromAscii(CStrVector("variant")),
NONE,
- kNonStrictMode);
+ SLOPPY);
break;
case UCOL_QUATERNARY:
// We shouldn't get quaternary and identical from ICU, but if we do
@@ -680,13 +680,13 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("strength")),
isolate->factory()->NewStringFromAscii(CStrVector("quaternary")),
NONE,
- kNonStrictMode);
+ SLOPPY);
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
isolate->factory()->NewStringFromAscii(CStrVector("variant")),
NONE,
- kNonStrictMode);
+ SLOPPY);
break;
default:
JSObject::SetProperty(
@@ -694,13 +694,13 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("strength")),
isolate->factory()->NewStringFromAscii(CStrVector("identical")),
NONE,
- kNonStrictMode);
+ SLOPPY);
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
isolate->factory()->NewStringFromAscii(CStrVector("variant")),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
JSObject::SetProperty(
@@ -709,7 +709,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->ToBoolean(collator->getAttribute(
UCOL_ALTERNATE_HANDLING, status) == UCOL_SHIFTED),
NONE,
- kNonStrictMode);
+ SLOPPY);
// Set the locale
char result[ULOC_FULLNAME_CAPACITY];
@@ -722,7 +722,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector(result)),
NONE,
- kNonStrictMode);
+ SLOPPY);
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(
@@ -730,7 +730,7 @@ void SetResolvedCollatorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector("und")),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
}
@@ -785,7 +785,7 @@ void SetResolvedBreakIteratorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector(result)),
NONE,
- kNonStrictMode);
+ SLOPPY);
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(
@@ -793,7 +793,7 @@ void SetResolvedBreakIteratorSettings(Isolate* isolate,
isolate->factory()->NewStringFromAscii(CStrVector("locale")),
isolate->factory()->NewStringFromAscii(CStrVector("und")),
NONE,
- kNonStrictMode);
+ SLOPPY);
}
}
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index ee5d991e3..8022f0592 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -85,7 +85,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -97,13 +97,19 @@ Address RelocInfo::target_address_address() {
}
+Address RelocInfo::constant_pool_entry_address() {
+ UNREACHABLE();
+ return NULL;
+}
+
+
int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
- Assembler::set_target_address_at(pc_, target);
+ Assembler::set_target_address_at(pc_, host_, target);
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -196,28 +202,28 @@ Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
ASSERT(*pc_ == kCallOpcode);
return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + 1));
+ Assembler::target_address_at(pc_ + 1, host_));
}
void RelocInfo::set_code_age_stub(Code* stub) {
ASSERT(*pc_ == kCallOpcode);
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
+ Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start());
}
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return Assembler::target_address_at(pc_ + 1);
+ return Assembler::target_address_at(pc_ + 1, host_);
}
void RelocInfo::set_call_address(Address target) {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- Assembler::set_target_address_at(pc_ + 1, target);
+ Assembler::set_target_address_at(pc_ + 1, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -248,7 +254,7 @@ void RelocInfo::WipeOut() {
Memory::Address_at(pc_) = NULL;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(pc_, pc_ + sizeof(int32_t));
+ Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
}
@@ -439,12 +445,15 @@ void Assembler::emit_w(const Immediate& x) {
}
-Address Assembler::target_address_at(Address pc) {
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
}
-void Assembler::set_target_address_at(Address pc, Address target) {
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target) {
int32_t* p = reinterpret_cast<int32_t*>(pc);
*p = target - (pc + sizeof(int32_t));
CPU::FlushICache(p, sizeof(int32_t));
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 733432028..3a4f590c8 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -160,6 +160,11 @@ bool RelocInfo::IsCodedSpecially() {
}
+bool RelocInfo::IsInConstantPool() {
+ return false;
+}
+
+
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Patch the code at the current address with the supplied instructions.
for (int i = 0; i < instruction_count; i++) {
@@ -1259,6 +1264,14 @@ void Assembler::bts(const Operand& dst, Register src) {
}
+void Assembler::bsr(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xBD);
+ emit_operand(dst, src);
+}
+
+
void Assembler::hlt() {
EnsureSpace ensure_space(this);
EMIT(0xF4);
@@ -2555,7 +2568,7 @@ void Assembler::RecordComment(const char* msg, bool force) {
void Assembler::GrowBuffer() {
- ASSERT(overflow());
+ ASSERT(buffer_overflow());
if (!own_buffer_) FATAL("external code buffer is too small");
// Compute new buffer size.
@@ -2614,7 +2627,7 @@ void Assembler::GrowBuffer() {
}
}
- ASSERT(!overflow());
+ ASSERT(!buffer_overflow());
}
@@ -2704,6 +2717,19 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
+MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
#ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 6ed0bc6d6..27e5302db 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -624,8 +624,21 @@ class Assembler : public AssemblerBase {
void GetCode(CodeDesc* desc);
// Read/Modify the code target in the branch/call instruction at pc.
- inline static Address target_address_at(Address pc);
- inline static void set_target_address_at(Address pc, Address target);
+ inline static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool);
+ inline static void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target);
+ static inline Address target_address_at(Address pc, Code* code) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ static inline void set_target_address_at(Address pc,
+ Code* code,
+ Address target) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target);
+ }
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -634,8 +647,8 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Address target) {
- set_target_address_at(instruction_payload, target);
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload, code, target);
}
static const int kSpecialTargetSize = kPointerSize;
@@ -882,6 +895,8 @@ class Assembler : public AssemblerBase {
void bt(const Operand& dst, Register src);
void bts(Register dst, Register src) { bts(Operand(dst), src); }
void bts(const Operand& dst, Register src);
+ void bsr(Register dst, Register src) { bsr(dst, Operand(src)); }
+ void bsr(Register dst, const Operand& src);
// Miscellaneous
void hlt();
@@ -1155,7 +1170,9 @@ class Assembler : public AssemblerBase {
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
// an instruction or relocation information.
- inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+ inline bool buffer_overflow() const {
+ return pc_ >= reloc_info_writer.pos() - kGap;
+ }
// Get the number of bytes available in the buffer.
inline int available_space() const { return reloc_info_writer.pos() - pc_; }
@@ -1174,6 +1191,12 @@ class Assembler : public AssemblerBase {
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+ // Allocate a constant pool of the correct size for the generated code.
+ MaybeObject* AllocateConstantPool(Heap* heap);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
protected:
void emit_sse_operand(XMMRegister reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, XMMRegister src);
@@ -1251,7 +1274,7 @@ class Assembler : public AssemblerBase {
class EnsureSpace BASE_EMBEDDED {
public:
explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
- if (assembler_->overflow()) assembler_->GrowBuffer();
+ if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
#ifdef DEBUG
space_before_ = assembler_->available_space();
#endif
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index d748d2362..785c5fd61 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -115,7 +115,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
@@ -125,19 +125,32 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions) {
+ bool count_constructions,
+ bool create_memento) {
// ----------- S t a t e -------------
// -- eax: number of arguments
// -- edi: constructor function
+ // -- ebx: allocation site or undefined
// -----------------------------------
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
+
+ // Should never create mementos before slack tracking is finished.
+ ASSERT(!count_constructions || !create_memento);
+
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(ebx);
+ __ push(ebx);
+ }
+
// Store a smi-tagged arguments count on the stack.
__ SmiTag(eax);
__ push(eax);
@@ -189,7 +202,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(edi); // constructor
// The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
__ pop(edi);
__ pop(eax);
@@ -202,20 +215,26 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// eax: initial map
__ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
__ shl(edi, kPointerSizeLog2);
+ if (create_memento) {
+ __ add(edi, Immediate(AllocationMemento::kSize));
+ }
+
__ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+
+ Factory* factory = masm->isolate()->factory();
+
// Allocated the JSObject, now initialize the fields.
// eax: initial map
// ebx: JSObject
- // edi: start of next object
+ // edi: start of next object (including memento if create_memento)
__ mov(Operand(ebx, JSObject::kMapOffset), eax);
- Factory* factory = masm->isolate()->factory();
__ mov(ecx, factory->empty_fixed_array());
__ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
__ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
// Set extra fields in the newly allocated object.
// eax: initial map
// ebx: JSObject
- // edi: start of next object
+ // edi: start of next object (including memento if create_memento)
__ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
__ mov(edx, factory->undefined_value());
if (count_constructions) {
@@ -231,8 +250,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
__ InitializeFieldsWithFiller(ecx, esi, edx);
__ mov(edx, factory->one_pointer_filler_map());
+ __ InitializeFieldsWithFiller(ecx, edi, edx);
+ } else if (create_memento) {
+ __ lea(esi, Operand(edi, -AllocationMemento::kSize));
+ __ InitializeFieldsWithFiller(ecx, esi, edx);
+
+ // Fill in memento fields if necessary.
+ // esi: points to the allocated but uninitialized memento.
+ Handle<Map> allocation_memento_map = factory->allocation_memento_map();
+ __ mov(Operand(esi, AllocationMemento::kMapOffset),
+ allocation_memento_map);
+ // Get the cell or undefined.
+ __ mov(edx, Operand(esp, kPointerSize*2));
+ __ mov(Operand(esi, AllocationMemento::kAllocationSiteOffset),
+ edx);
+ } else {
+ __ InitializeFieldsWithFiller(ecx, edi, edx);
}
- __ InitializeFieldsWithFiller(ecx, edi, edx);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -323,16 +357,48 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
__ bind(&rt_call);
+ int offset = 0;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ mov(edi, Operand(esp, kPointerSize * 2));
+ __ push(edi);
+ offset = kPointerSize;
+ }
+
// Must restore edi (constructor) before calling runtime.
- __ mov(edi, Operand(esp, 0));
+ __ mov(edi, Operand(esp, offset));
// edi: function (constructor)
__ push(edi);
- __ CallRuntime(Runtime::kNewObject, 1);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ }
__ mov(ebx, eax); // store result in ebx
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
// New object allocated.
// ebx: newly allocated object
__ bind(&allocated);
+
+ if (create_memento) {
+ __ mov(ecx, Operand(esp, kPointerSize * 2));
+ __ cmp(ecx, masm->isolate()->factory()->undefined_value());
+ __ j(equal, &count_incremented);
+ // ecx is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ add(FieldOperand(ecx, AllocationSite::kPretenureCreateCountOffset),
+ Immediate(Smi::FromInt(1)));
+ __ bind(&count_incremented);
+ }
+
// Retrieve the function from the stack.
__ pop(edi);
@@ -415,17 +481,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
@@ -434,7 +500,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
- __ Set(esi, Immediate(0));
+ __ Move(esi, Immediate(0));
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -456,7 +522,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Copy arguments to the stack in a loop.
Label loop, entry;
- __ Set(ecx, Immediate(0));
+ __ Move(ecx, Immediate(0));
__ jmp(&entry);
__ bind(&loop);
__ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
@@ -473,9 +539,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code.
if (is_construct) {
// No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- __ mov(ebx, Immediate(undefined_sentinel));
+ __ mov(ebx, masm->isolate()->factory()->undefined_value());
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
} else {
@@ -503,7 +567,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
GenerateTailCallToReturnedCode(masm);
}
@@ -518,7 +582,7 @@ static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
// Whether to compile in a background thread.
__ Push(masm->isolate()->factory()->ToBoolean(concurrent));
- __ CallRuntime(Runtime::kCompileOptimized, 2);
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
// Restore receiver.
__ pop(edi);
}
@@ -622,7 +686,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ popad();
// Tear down internal frame.
}
@@ -654,7 +718,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass deoptimization type to the runtime system.
__ push(Immediate(Smi::FromInt(static_cast<int>(type))));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
// Tear down internal frame.
}
@@ -721,7 +785,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 3a. Patch the first argument if necessary when calling a function.
Label shift_arguments;
- __ Set(edx, Immediate(0)); // indicate regular JS_FUNCTION
+ __ Move(edx, Immediate(0)); // indicate regular JS_FUNCTION
{ Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -737,7 +801,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
1 << SharedFunctionInfo::kNativeBitWithinByte);
__ j(not_equal, &shift_arguments);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ mov(ebx, Operand(esp, eax, times_4, 0)); // First argument.
// Call ToObject on the receiver if it is not an object, or use the
@@ -761,7 +825,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ push(ebx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ mov(ebx, eax);
- __ Set(edx, Immediate(0)); // restore
+ __ Move(edx, Immediate(0)); // restore
__ pop(eax);
__ SmiUntag(eax);
@@ -784,11 +848,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 3b. Check for function proxy.
__ bind(&slow);
- __ Set(edx, Immediate(1)); // indicate function proxy
+ __ Move(edx, Immediate(1)); // indicate function proxy
__ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
__ j(equal, &shift_arguments);
__ bind(&non_function);
- __ Set(edx, Immediate(2)); // indicate non-function
+ __ Move(edx, Immediate(2)); // indicate non-function
// 3c. Patch the first argument when calling a non-function. The
// CALL_NON_FUNCTION builtin expects the non-function callee as
@@ -816,7 +880,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{ Label function, non_proxy;
__ test(edx, edx);
__ j(zero, &function);
- __ Set(ebx, Immediate(0));
+ __ Move(ebx, Immediate(0));
__ cmp(edx, Immediate(1));
__ j(not_equal, &non_proxy);
@@ -923,7 +987,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
1 << SharedFunctionInfo::kNativeBitWithinByte);
__ j(not_equal, &push_receiver);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
// Call ToObject on the receiver if it is not an object, or use the
// global object if it is null or undefined.
__ JumpIfSmi(ebx, &call_to_object);
@@ -994,7 +1058,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ bind(&call_proxy);
__ push(edi); // add function proxy as last argument
__ inc(eax);
- __ Set(ebx, Immediate(0));
+ __ Move(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
__ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -1057,10 +1121,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// tail call a stub
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ mov(ebx, Immediate(undefined_sentinel));
+ __ mov(ebx, masm->isolate()->factory()->undefined_value());
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -1131,7 +1192,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Set properties and elements.
Factory* factory = masm->isolate()->factory();
- __ Set(ecx, Immediate(factory->empty_fixed_array()));
+ __ Move(ecx, Immediate(factory->empty_fixed_array()));
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
__ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx);
@@ -1172,7 +1233,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Load the empty string into ebx, remove the receiver from the
// stack, and jump back to the case where the argument is a string.
__ bind(&no_arguments);
- __ Set(ebx, Immediate(factory->empty_string()));
+ __ Move(ebx, Immediate(factory->empty_string()));
__ pop(ecx);
__ lea(esp, Operand(esp, kPointerSize));
__ push(ecx);
@@ -1358,7 +1419,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ j(above_equal, &ok, Label::kNear);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
}
__ jmp(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index e280c50e7..ab29167e9 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -50,7 +50,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
}
@@ -81,7 +81,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
}
@@ -92,7 +92,8 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
}
@@ -103,15 +104,15 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
}
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { ebx };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { ebx, edx };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
@@ -146,7 +147,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
}
@@ -170,6 +171,26 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
+void StringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -217,7 +238,7 @@ static void InitializeArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
}
@@ -245,7 +266,7 @@ static void InitializeInternalArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
}
@@ -368,7 +389,7 @@ void StringAddStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
}
@@ -825,8 +846,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&try_arithmetic_simplification);
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
__ cvttsd2si(exponent, Operand(double_exponent));
- __ cmp(exponent, Immediate(0x80000000u));
- __ j(equal, &call_runtime);
+ __ cmp(exponent, Immediate(0x1));
+ __ j(overflow, &call_runtime);
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
@@ -1046,91 +1067,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void StringLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- if (kind() == Code::KEYED_LOAD_IC) {
- __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
- __ j(not_equal, &miss);
- }
-
- StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss);
- __ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = edx;
- Register value = eax;
- Register scratch = ebx;
-
- if (kind() == Code::KEYED_STORE_IC) {
- __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
- __ j(not_equal, &miss);
- }
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
- __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ mov(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
- __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ pop(scratch);
- __ push(receiver);
- __ push(value);
- __ push(scratch); // return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in edx and the parameter count is in eax.
@@ -1190,7 +1126,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// esp[0] : return address
// esp[4] : number of parameters
// esp[8] : receiver displacement
@@ -1211,11 +1147,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
__ mov(Operand(esp, 2 * kPointerSize), edx);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
// esp[0] : return address
@@ -1275,7 +1211,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
// 3. Arguments object.
- __ add(ebx, Immediate(Heap::kArgumentsObjectSize));
+ __ add(ebx, Immediate(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
@@ -1293,7 +1229,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ test(ebx, ebx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
__ mov(edi, Operand(edi,
- Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX)));
__ jmp(&copy, Label::kNear);
__ bind(&has_mapped_parameters);
@@ -1330,7 +1266,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, edi will point there, otherwise to the
// backing store.
- __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
+ __ lea(edi, Operand(eax, Heap::kSloppyArgumentsObjectSize));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
// eax = address of new object (tagged)
@@ -1349,7 +1285,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ j(zero, &skip_parameter_map);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(isolate->factory()->non_strict_arguments_elements_map()));
+ Immediate(isolate->factory()->sloppy_arguments_elements_map()));
__ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
__ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
@@ -1436,7 +1372,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ bind(&runtime);
__ pop(eax); // Remove saved parameter count.
__ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
@@ -1475,7 +1411,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ j(zero, &add_arguments_object, Label::kNear);
__ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));
+ __ add(ecx, Immediate(Heap::kStrictArgumentsObjectSize));
// Do the allocation of both objects in one go.
__ Allocate(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
@@ -1484,7 +1420,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
const int offset =
- Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
+ Context::SlotOffset(Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX);
__ mov(edi, Operand(edi, offset));
// Copy the JS object part.
@@ -1510,7 +1446,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
+ __ lea(edi, Operand(eax, Heap::kStrictArgumentsObjectSize));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
Immediate(isolate->factory()->fixed_array_map()));
@@ -1535,7 +1471,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
}
@@ -1544,7 +1480,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -1607,7 +1543,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(above, &runtime);
// Reset offset for possibly sliced string.
- __ Set(edi, Immediate(0));
+ __ Move(edi, Immediate(0));
__ mov(eax, Operand(esp, kSubjectOffset));
__ JumpIfSmi(eax, &runtime);
__ mov(edx, eax); // Make a copy of the original subject string.
@@ -1701,7 +1637,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
__ j(above_equal, &runtime);
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(ecx, Immediate(1)); // Type is one byte.
+ __ Move(ecx, Immediate(1)); // Type is one byte.
// (E) Carry on. String handling is done.
__ bind(&check_code);
@@ -1928,7 +1864,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
// Deferred code for string handling.
// (7) Not a long external string? If yes, go to (10).
@@ -1969,7 +1905,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
__ j(above_equal, &runtime);
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
- __ Set(ecx, Immediate(0)); // Type is two byte.
+ __ Move(ecx, Immediate(0)); // Type is two byte.
__ jmp(&check_code); // Go to (E).
// (10) Not a string or a short external string? If yes, bail out to runtime.
@@ -2066,7 +2002,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
Label check_for_nan;
__ cmp(edx, masm->isolate()->factory()->undefined_value());
__ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+ __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
__ ret(0);
__ bind(&check_for_nan);
}
@@ -2081,7 +2017,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &not_identical);
}
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
@@ -2195,7 +2131,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ j(below, &below_label, Label::kNear);
__ j(above, &above_label, Label::kNear);
- __ Set(eax, Immediate(0));
+ __ Move(eax, Immediate(0));
__ ret(0);
__ bind(&below_label);
@@ -2287,7 +2223,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ j(zero, &return_unequal, Label::kNear);
// The objects are both undetectable, so they both compare as the value
// undefined, and are equal.
- __ Set(eax, Immediate(EQUAL));
+ __ Move(eax, Immediate(EQUAL));
__ bind(&return_unequal);
// Return non-equal by returning the non-zero object pointer in eax,
// or return equal if we fell through to here.
@@ -2322,95 +2258,115 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// eax : number of arguments to the construct function
- // ebx : cache cell for call target
+ // ebx : Feedback vector
+ // edx : slot in feedback vector (Smi)
// edi : the function to call
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
// Load the cache state into ecx.
- __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
+ __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ cmp(ecx, edi);
- __ j(equal, &done);
- __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ j(equal, &done);
-
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in ecx.
- Handle<Map> allocation_site_map =
- masm->isolate()->factory()->allocation_site_map();
- __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
- __ j(not_equal, &miss);
-
- // Load the global or builtins object from the current context
- __ LoadGlobalContext(ecx);
- // Make sure the function is the Array() function
- __ cmp(edi, Operand(ecx,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
- __ j(not_equal, &megamorphic);
- __ jmp(&done);
+ __ j(equal, &done, Label::kFar);
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ j(equal, &done, Label::kFar);
+
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
+ __ j(not_equal, &miss);
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done, Label::kFar);
+ }
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
+ __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
__ j(equal, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ mov(FieldOperand(ebx, Cell::kValueOffset),
- Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ jmp(&done, Label::kNear);
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+ __ jmp(&done, Label::kFar);
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
__ bind(&initialize);
- __ LoadGlobalContext(ecx);
- // Make sure the function is the Array() function
- __ cmp(edi, Operand(ecx,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
- __ j(not_equal, &not_array_function);
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the cell
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ j(not_equal, &not_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Arguments register must be smi-tagged to call out.
- __ SmiTag(eax);
- __ push(eax);
- __ push(edi);
- __ push(ebx);
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(eax);
+ __ push(eax);
+ __ push(edi);
+ __ push(edx);
+ __ push(ebx);
- CreateAllocationSiteStub create_stub;
- __ CallStub(&create_stub);
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
- __ pop(ebx);
- __ pop(edi);
- __ pop(eax);
- __ SmiUntag(eax);
+ __ pop(ebx);
+ __ pop(edx);
+ __ pop(edi);
+ __ pop(eax);
+ __ SmiUntag(eax);
+ }
+ __ jmp(&done);
+
+ __ bind(&not_array_function);
}
- __ jmp(&done);
- __ bind(&not_array_function);
- __ mov(FieldOperand(ebx, Cell::kValueOffset), edi);
- // No need for a write barrier here - cells are rescanned.
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ edi);
+ // We won't need edx or ebx anymore, just save edi
+ __ push(edi);
+ __ push(ebx);
+ __ push(edx);
+ __ RecordWriteArray(ebx, edi, edx, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ pop(edx);
+ __ pop(ebx);
+ __ pop(edi);
__ bind(&done);
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
- // ebx : cache cell for call target
+ // ebx : feedback vector
+ // edx : (only if ebx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
// edi : the function to call
Isolate* isolate = masm->isolate();
Label slow, non_function, wrap, cont;
@@ -2425,6 +2381,10 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+ // Type information was updated. Because we may call Array, which
+ // expects either undefined or an AllocationSite in ebx we need
+ // to set ebx to undefined.
+ __ mov(ebx, Immediate(isolate->factory()->undefined_value()));
}
}
@@ -2468,9 +2428,10 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- __ mov(FieldOperand(ebx, Cell::kValueOffset),
- Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
+ // object (megamorphic symbol) so no write barrier is needed.
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
}
// Check for function proxy.
__ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
@@ -2478,8 +2439,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ pop(ecx);
__ push(edi); // put proxy as additional argument under return address
__ push(ecx);
- __ Set(eax, Immediate(argc_ + 1));
- __ Set(ebx, Immediate(0));
+ __ Move(eax, Immediate(argc_ + 1));
+ __ Move(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
{
Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
@@ -2490,8 +2451,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// of the original receiver from the call site).
__ bind(&non_function);
__ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
- __ Set(eax, Immediate(argc_));
- __ Set(ebx, Immediate(0));
+ __ Move(eax, Immediate(argc_));
+ __ Move(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
__ jmp(adaptor, RelocInfo::CODE_TARGET);
@@ -2514,7 +2475,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// eax : number of arguments
- // ebx : cache cell for call target
+ // ebx : feedback vector
+ // edx : (only if ebx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
// edi : constructor function
Label slow, non_function_call;
@@ -2526,6 +2489,27 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into ebx.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by edx + 1.
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into ebx, or undefined.
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
+ __ j(equal, &feedback_register_initialized);
+ __ mov(ebx, masm->isolate()->factory()->undefined_value());
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(ebx);
}
// Jump to the function-specific construct stub.
@@ -2550,7 +2534,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
// Set expected number of arguments to zero (not changing eax).
- __ Set(ebx, Immediate(0));
+ __ Move(ebx, Immediate(0));
Handle<Code> arguments_adaptor =
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
__ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
@@ -2600,23 +2584,9 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
}
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- __ mov(scratch, value);
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ and_(scratch, 0xf);
- __ cmp(scratch, 0xf);
- __ j(equal, oom_label);
-}
-
-
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate_scope) {
// eax: result parameter for PerformGC, if any
@@ -2711,15 +2681,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
__ j(zero, &retry, Label::kNear);
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception);
-
// Retrieve the pending exception.
__ mov(eax, Operand::StaticVariable(pending_exception_address));
- // See if we just retrieved an OOM exception.
- JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception);
-
// Clear the pending exception.
__ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
__ mov(Operand::StaticVariable(pending_exception_address), edx);
@@ -2763,13 +2727,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Label throw_normal_exception;
Label throw_termination_exception;
- Label throw_out_of_memory_exception;
// Call into the runtime system.
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
false,
false);
@@ -2777,7 +2739,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
false);
@@ -2787,26 +2748,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
true);
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ mov(Operand::StaticVariable(external_caught), Immediate(false));
-
- // Set pending exception and eax to out of memory exception.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate);
- Label already_have_failure;
- JumpIfOOM(masm, eax, ecx, &already_have_failure);
- __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException(0x1)));
- __ bind(&already_have_failure);
- __ mov(Operand::StaticVariable(pending_exception), eax);
- // Fall through to the next label.
+ { FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, eax);
+ __ CallCFunction(
+ ExternalReference::out_of_memory_function(masm->isolate()), 0);
+ }
__ bind(&throw_termination_exception);
__ ThrowUncatchable(eax);
@@ -3041,7 +2990,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
__ mov(Operand(scratch, kDeltaToMovImmediate), eax);
if (!ReturnTrueFalseObject()) {
- __ Set(eax, Immediate(0));
+ __ Move(eax, Immediate(0));
}
}
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
@@ -3061,7 +3010,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
__ mov(Operand(scratch, kDeltaToMovImmediate), eax);
if (!ReturnTrueFalseObject()) {
- __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ Move(eax, Immediate(Smi::FromInt(1)));
}
}
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
@@ -3077,20 +3026,20 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Null is not instance of anything.
__ cmp(object, factory->null_value());
__ j(not_equal, &object_not_null, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ Move(eax, Immediate(Smi::FromInt(1)));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&object_not_null);
// Smi values is not instance of anything.
__ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ Move(eax, Immediate(Smi::FromInt(1)));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&object_not_null_or_smi);
// String values is not instance of anything.
Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
__ j(NegateCondition(is_string), &slow, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ Move(eax, Immediate(Smi::FromInt(1)));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
// Slow-case: Go through the JavaScript implementation.
@@ -3187,7 +3136,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
} else {
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
}
if (!index_.is(eax)) {
// Save the conversion result before the pop instructions below
@@ -3213,7 +3162,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ push(object_);
__ SmiTag(index_);
__ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
if (!result_.is(eax)) {
__ mov(result_, eax);
}
@@ -3238,7 +3187,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ j(not_zero, &slow_case_);
Factory* factory = masm->isolate()->factory();
- __ Set(result_, Immediate(factory->single_character_string_cache()));
+ __ Move(result_, Immediate(factory->single_character_string_cache()));
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiShiftSize == 0);
@@ -3609,7 +3558,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
__ bind(&single_char);
// eax: string
@@ -3637,7 +3586,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
__ cmp(length, FieldOperand(right, String::kLengthOffset));
__ j(equal, &check_zero_length, Label::kNear);
__ bind(&strings_not_equal);
- __ Set(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
__ ret(0);
// Check if the length is zero.
@@ -3646,7 +3595,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
STATIC_ASSERT(kSmiTag == 0);
__ test(length, length);
__ j(not_zero, &compare_chars, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
// Compare characters.
@@ -3655,7 +3604,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
&strings_not_equal, Label::kNear);
// Characters are equal.
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
}
@@ -3703,7 +3652,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Result is EQUAL.
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
Label result_greater;
@@ -3716,12 +3665,12 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ bind(&result_less);
// Result is LESS.
- __ Set(eax, Immediate(Smi::FromInt(LESS)));
+ __ Move(eax, Immediate(Smi::FromInt(LESS)));
__ ret(0);
// Result is GREATER.
__ bind(&result_greater);
- __ Set(eax, Immediate(Smi::FromInt(GREATER)));
+ __ Move(eax, Immediate(Smi::FromInt(GREATER)));
__ ret(0);
}
@@ -3772,7 +3721,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &not_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ IncrementCounter(masm->isolate()->counters()->string_compare_native(), 1);
__ ret(2 * kPointerSize);
@@ -3791,7 +3740,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
@@ -4178,7 +4127,7 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ j(not_equal, &done, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ bind(&done);
__ ret(0);
@@ -4223,7 +4172,7 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
__ j(not_equal, &done, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ bind(&done);
__ ret(0);
@@ -4269,7 +4218,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ j(not_equal, &not_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
// Handle not identical strings.
@@ -4314,7 +4263,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
__ bind(&miss);
@@ -4676,7 +4625,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
masm,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
@@ -4691,13 +4640,13 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
masm,
kReturnOnNoNeedToInformIncrementalMarker,
mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ ret(0);
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
@@ -4707,18 +4656,11 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
Immediate(ExternalReference::isolate_address(masm->isolate())));
AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
+
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
@@ -5137,15 +5079,11 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc (only if argument_count_ == ANY)
- // -- ebx : type info cell
+ // -- ebx : AllocationSite or undefined
// -- edi : constructor
// -- esp[0] : return address
// -- esp[4] : last argument
// -----------------------------------
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
-
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
@@ -5158,25 +5096,15 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(ecx, MAP_TYPE, ecx);
__ Assert(equal, kUnexpectedInitialMapForArrayFunction);
- // We should either have undefined in ebx or a valid cell
- Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ cmp(ebx, Immediate(undefined_sentinel));
- __ j(equal, &okay_here);
- __ cmp(FieldOperand(ebx, 0), Immediate(cell_map));
- __ Assert(equal, kExpectedPropertyCellInRegisterEbx);
- __ bind(&okay_here);
+ // We should either have undefined in ebx or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(ebx);
}
Label no_info;
- // If the type cell is undefined, or contains anything other than an
- // AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ cmp(ebx, Immediate(undefined_sentinel));
+ // If the feedback vector is the undefined value call an array constructor
+ // that doesn't use AllocationSites.
+ __ cmp(ebx, masm->isolate()->factory()->undefined_value());
__ j(equal, &no_info);
- __ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
- __ cmp(FieldOperand(ebx, 0), Immediate(
- masm->isolate()->factory()->allocation_site_map()));
- __ j(not_equal, &no_info);
// Only look at the lower 16 bits of the transition info.
__ mov(edx, FieldOperand(ebx, AllocationSite::kTransitionInfoOffset));
@@ -5229,7 +5157,6 @@ void InternalArrayConstructorStub::GenerateCase(
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
- // -- ebx : type info cell
// -- edi : constructor
// -- esp[0] : return address
// -- esp[4] : last argument
@@ -5301,7 +5228,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register context = esi;
int argc = ArgumentBits::decode(bit_field_);
- bool restore_context = RestoreContextBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
typedef FunctionCallbackArguments FCA;
@@ -5370,9 +5297,9 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// FunctionCallbackInfo::values_.
__ mov(ApiParameterOperand(3), scratch);
// FunctionCallbackInfo::length_.
- __ Set(ApiParameterOperand(4), Immediate(argc));
+ __ Move(ApiParameterOperand(4), Immediate(argc));
// FunctionCallbackInfo::is_construct_call_.
- __ Set(ApiParameterOperand(5), Immediate(0));
+ __ Move(ApiParameterOperand(5), Immediate(0));
// v8::InvocationCallback's argument.
__ lea(scratch, ApiParameterOperand(2));
@@ -5382,15 +5309,20 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Operand context_restore_operand(ebp,
(2 + FCA::kContextSaveIndex) * kPointerSize);
- Operand return_value_operand(ebp,
- (2 + FCA::kReturnValueOffset) * kPointerSize);
+ // Stores return the first js argument
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ Operand return_value_operand(ebp, return_value_offset * kPointerSize);
__ CallApiFunctionAndReturn(api_function_address,
thunk_address,
ApiParameterOperand(1),
argc + FCA::kArgsLength + 1,
return_value_operand,
- restore_context ?
- &context_restore_operand : NULL);
+ &context_restore_operand);
}
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index e383a9d7e..cf20a11c6 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -29,7 +29,6 @@
#define V8_IA32_CODE_STUBS_IA32_H_
#include "macro-assembler.h"
-#include "code-stubs.h"
#include "ic-inl.h"
namespace v8 {
@@ -428,7 +427,7 @@ class RecordWriteStub: public PlatformCodeStub {
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index 76a7003bf..42284ec75 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -138,7 +138,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ Set(eax, Immediate(0)); // No arguments.
+ __ Move(eax, Immediate(0)); // No arguments.
__ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
CEntryStub ceb(1);
@@ -154,7 +154,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
int r = JSCallerSavedCode(i);
Register reg = { r };
if (FLAG_debug_code) {
- __ Set(reg, Immediate(kDebugZapValue));
+ __ Move(reg, Immediate(kDebugZapValue));
}
bool taken = reg.code() == esi.code();
if ((object_regs & (1 << r)) != 0) {
@@ -280,10 +280,12 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-ia32.cc).
// ----------- S t a t e -------------
- // -- ebx: cache cell for call target
+ // -- ebx: feedback array
+ // -- edx: slot in feedback array
// -- edi: function
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), 0, false);
+ Generate_DebugBreakCallHelper(masm, ebx.bit() | edx.bit() | edi.bit(),
+ 0, false);
}
@@ -306,11 +308,13 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// above IC call.
// ----------- S t a t e -------------
// -- eax: number of arguments (not smi)
- // -- ebx: cache cell for call target
+ // -- ebx: feedback array
+ // -- edx: feedback slot (smi)
// -- edi: constructor function
// -----------------------------------
// The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), eax.bit(), false);
+ Generate_DebugBreakCallHelper(masm, ebx.bit() | edx.bit() | edi.bit(),
+ eax.bit(), false);
}
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 5300dde9a..711cdf86f 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -116,6 +116,27 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
+
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->int3();
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->int3();
+ }
+ }
+
// We will overwrite the code's relocation info in-place. Relocation info
// is written backward. The relocation info is the payload of a byte
// array. Later on we will slide this to the start of the byte array and
@@ -124,9 +145,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address reloc_end_address = reloc_info->address() + reloc_info->Size();
RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
-
// Since the call is a relative encoding, write new
// reloc info. We do not need any of the existing reloc info because the
// existing code will not be used again (we zap it in debug builds).
@@ -134,9 +152,14 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// Emit call to lazy deoptimization at all lazy deopt points.
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
// Patch lazy deoptimization entry.
@@ -440,6 +463,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
}
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
#undef __
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 6a7f3bc83..e50a78e34 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -407,10 +407,11 @@ int DisassemblerIA32::PrintRightOperandHelper(
return 2;
} else if (base == ebp) {
int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
- AppendToBuffer("[%s*%d+0x%x]",
+ AppendToBuffer("[%s*%d%s0x%x]",
(this->*register_name)(index),
1 << scale,
- disp);
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
return 6;
} else if (index != esp && base != ebp) {
// [base+index*scale]
@@ -434,23 +435,30 @@ int DisassemblerIA32::PrintRightOperandHelper(
byte sib = *(modrmp + 1);
int scale, index, base;
get_sib(sib, &scale, &index, &base);
- int disp =
- mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2) : *(modrmp + 2);
+ int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2)
+ : *reinterpret_cast<int8_t*>(modrmp + 2);
if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
- AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
+ AppendToBuffer("[%s%s0x%x]",
+ (this->*register_name)(rm),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
} else {
- AppendToBuffer("[%s+%s*%d+0x%x]",
+ AppendToBuffer("[%s+%s*%d%s0x%x]",
(this->*register_name)(base),
(this->*register_name)(index),
1 << scale,
- disp);
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
}
return mod == 2 ? 6 : 3;
} else {
// No sib.
- int disp =
- mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1) : *(modrmp + 1);
- AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
+ int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1)
+ : *reinterpret_cast<int8_t*>(modrmp + 1);
+ AppendToBuffer("[%s%s0x%x]",
+ (this->*register_name)(rm),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
return mod == 2 ? 5 : 2;
}
break;
@@ -881,6 +889,7 @@ static const char* F0Mnem(byte f0byte) {
case 0xAD: return "shrd";
case 0xAC: return "shrd"; // 3-operand version.
case 0xAB: return "bts";
+ case 0xBD: return "bsr";
default: return NULL;
}
}
@@ -1096,22 +1105,26 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += SetCC(data);
} else if ((f0byte & 0xF0) == 0x40) {
data += CMov(data);
- } else {
+ } else if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
+ // shrd, shld, bts
data += 2;
- if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
- // shrd, shld, bts
- AppendToBuffer("%s ", f0mnem);
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightOperand(data);
- if (f0byte == 0xAB) {
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- } else {
- AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
- }
+ AppendToBuffer("%s ", f0mnem);
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightOperand(data);
+ if (f0byte == 0xAB) {
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
} else {
- UnimplementedInstruction();
+ AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
}
+ } else if (f0byte == 0xBD) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
+ data += PrintRightOperand(data);
+ } else {
+ UnimplementedInstruction();
}
}
break;
@@ -1606,13 +1619,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- } else if (b2 == 0x6F) {
+ } else if (b2 == 0x6F) {
data += 3;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- } else if (b2 == 0x7F) {
+ } else if (b2 == 0x7F) {
AppendToBuffer("movdqu ");
data += 3;
int mod, regop, rm;
diff --git a/deps/v8/src/ia32/frames-ia32.h b/deps/v8/src/ia32/frames-ia32.h
index e0f3e32f7..2d6145eea 100644
--- a/deps/v8/src/ia32/frames-ia32.h
+++ b/deps/v8/src/ia32/frames-ia32.h
@@ -84,6 +84,8 @@ class ExitFrameConstants : public AllStatic {
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
static const int kCallerSPDisplacement = +2 * kPointerSize;
+
+ static const int kConstantPoolOffset = 0; // Not used
};
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index f3125666f..70a968e8a 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -101,6 +101,25 @@ class JumpPatchSite BASE_EMBEDDED {
};
+static void EmitStackCheck(MacroAssembler* masm_,
+ int pointers = 0,
+ Register scratch = esp) {
+ Label ok;
+ Isolate* isolate = masm_->isolate();
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate);
+ ASSERT(scratch.is(esp) == (pointers == 0));
+ if (pointers != 0) {
+ __ mov(scratch, esp);
+ __ sub(scratch, Immediate(pointers * kPointerSize));
+ }
+ __ cmp(scratch, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ __ call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+}
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the
@@ -118,6 +137,9 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ InitializeFeedbackVector();
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -132,10 +154,10 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Classic mode functions and builtins need to replace the receiver with the
+ // Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->is_classic_mode() && !info->is_native()) {
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
// +1 for return address.
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
@@ -168,8 +190,26 @@ void FullCodeGenerator::Generate() {
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
+ if (locals_count >= 128) {
+ EmitStackCheck(masm_, locals_count, ecx);
+ }
__ mov(eax, Immediate(isolate()->factory()->undefined_value()));
- for (int i = 0; i < locals_count; i++) {
+ const int kMaxPushes = 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ mov(ecx, loop_iterations);
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ for (int i = 0; i < kMaxPushes; i++) {
+ __ push(eax);
+ }
+ __ dec(ecx);
+ __ j(not_zero, &loop_header, Label::kNear);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ for (int i = 0; i < remaining; i++) {
__ push(eax);
}
}
@@ -185,13 +225,13 @@ void FullCodeGenerator::Generate() {
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
__ push(edi);
__ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ push(edi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in eax. It replaces the context passed to us.
@@ -242,12 +282,12 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
+ if (strict_mode() == STRICT) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
ArgumentsAccessStub stub(type);
__ CallStub(&stub);
@@ -273,7 +313,7 @@ void FullCodeGenerator::Generate() {
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableDeclaration* function = scope()->function();
ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
+ function->proxy()->var()->mode() == CONST_LEGACY);
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
@@ -282,13 +322,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
- __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ bind(&ok);
+ EmitStackCheck(masm_);
}
{ Comment cmnt(masm_, "[ Body");
@@ -308,7 +342,7 @@ void FullCodeGenerator::Generate() {
void FullCodeGenerator::ClearAccumulator() {
- __ Set(eax, Immediate(Smi::FromInt(0)));
+ __ Move(eax, Immediate(Smi::FromInt(0)));
}
@@ -467,9 +501,9 @@ void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
if (lit->IsSmi()) {
- __ SafeSet(result_register(), Immediate(lit));
+ __ SafeMove(result_register(), Immediate(lit));
} else {
- __ Set(result_register(), Immediate(lit));
+ __ Move(result_register(), Immediate(lit));
}
}
@@ -626,7 +660,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, NOT_CONTEXTUAL, condition->test_id());
+ CallIC(ic, condition->test_id());
__ test(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
@@ -743,7 +777,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_->Add(variable->name(), zone());
@@ -790,7 +824,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
} else {
__ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -843,7 +877,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ push(Immediate(variable->name()));
__ push(Immediate(Smi::FromInt(NONE)));
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -913,7 +947,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ push(esi); // The context is the first argument.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
// Return value is ignored.
}
@@ -921,7 +955,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
// Return value is ignored.
}
@@ -977,7 +1011,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, NOT_CONTEXTUAL, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
Label skip;
@@ -1021,6 +1055,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
+
SetStatementPosition(stmt);
Label loop, exit;
@@ -1099,20 +1135,22 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(ebx, cell);
- __ mov(FieldOperand(ebx, Cell::kValueOffset),
- Immediate(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
+ Handle<Object> feedback = Handle<Object>(
+ Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
+ isolate());
+ StoreFeedbackVectorSlot(slot, feedback);
+
+ // No need for a write barrier, we are storing a Smi in the feedback vector.
+ __ LoadHeapObject(ebx, FeedbackVector());
+ __ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(slot)),
+ Immediate(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)));
__ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
__ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
__ j(above, &non_proxy);
- __ Set(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
+ __ Move(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
__ bind(&non_proxy);
__ push(ebx); // Smi
__ push(eax); // Array
@@ -1260,7 +1298,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ FastNewClosureStub stub(info->strict_mode(), info->is_generator());
__ mov(ebx, Immediate(info));
__ CallStub(&stub);
} else {
@@ -1269,7 +1307,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ push(Immediate(pretenure
? isolate()->factory()->true_value()
: isolate()->factory()->false_value()));
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
}
context()->Plug(eax);
}
@@ -1290,7 +1328,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
@@ -1304,7 +1342,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// If no outer scope calls eval, we do not need to check more
// context extensions. If we have reached an eval scope, we check
// all extensions from this point.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1349,7 +1387,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
@@ -1386,16 +1424,15 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, done);
- if (local->mode() == CONST) {
+ if (local->mode() == CONST_LEGACY) {
__ mov(eax, isolate()->factory()->undefined_value());
- } else { // LET || CONST_HARMONY
+ } else { // LET || CONST
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
}
}
__ jmp(done);
@@ -1412,7 +1449,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object in eax.
__ mov(edx, GlobalObjectOperand());
@@ -1425,9 +1462,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1459,7 +1495,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Check that we always have valid source position.
ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
+ skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
@@ -1469,14 +1505,14 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
GetVar(eax, var);
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
+ ASSERT(var->mode() == CONST_LEGACY);
__ mov(eax, isolate()->factory()->undefined_value());
}
__ bind(&done);
@@ -1489,15 +1525,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
__ push(esi); // Context.
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ bind(&done);
context()->Plug(eax);
break;
@@ -1528,7 +1564,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(expr->pattern()));
__ push(Immediate(expr->flags()));
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
__ mov(ebx, eax);
__ bind(&materialized);
@@ -1540,7 +1576,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ bind(&runtime_allocate);
__ push(ebx);
__ push(Immediate(Smi::FromInt(size)));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ pop(ebx);
__ bind(&allocated);
@@ -1581,8 +1617,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
- if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1 || Serializer::enabled() ||
+ if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1590,7 +1625,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_properties));
__ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
} else {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
@@ -1633,7 +1668,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(ecx, Immediate(key->value()));
__ mov(edx, Operand(esp, 0));
- CallStoreIC(NOT_CONTEXTUAL, key->LiteralFeedbackId());
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1743,7 +1778,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_elements));
__ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
@@ -1814,13 +1849,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -1960,7 +1991,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ cmp(esp, ebx);
__ j(equal, &post_runtime);
__ push(eax); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ mov(context_register(),
Operand(ebp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
@@ -2028,7 +2059,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(ecx, esi);
__ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ mov(context_register(),
Operand(ebp, StandardFrameConstants::kContextOffset));
__ pop(eax); // result
@@ -2047,7 +2078,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_call);
__ mov(edx, Operand(esp, kPointerSize));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, TypeFeedbackId::None());
+ CallIC(ic, TypeFeedbackId::None());
__ mov(edi, eax);
__ mov(Operand(esp, 2 * kPointerSize), edi);
CallFunctionStub stub(1, CALL_AS_METHOD);
@@ -2082,7 +2113,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in eax, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
// is read to throw the value when the resumed generator is already closed.
// ebx will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
@@ -2162,7 +2193,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ push(ebx);
__ push(result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ Abort(kGeneratorFailedToResume);
@@ -2176,14 +2207,14 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
} else {
// Throw the provided value.
__ push(eax);
- __ CallRuntime(Runtime::kThrow, 1);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
}
__ jmp(&done);
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
__ push(ebx);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
@@ -2201,7 +2232,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ mov(context_register(),
Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2237,7 +2268,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2258,8 +2289,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2269,10 +2299,9 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
switch (op) {
case Token::SAR:
- __ SmiUntag(eax);
__ SmiUntag(ecx);
__ sar_cl(eax); // No checks of result necessary
- __ SmiTag(eax);
+ __ and_(eax, Immediate(~kSmiTagMask));
break;
case Token::SHL: {
Label result_ok;
@@ -2344,20 +2373,14 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(edx);
BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(eax);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
+ ASSERT(expr->IsValidLeftHandSide());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2383,7 +2406,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(edx, eax);
__ pop(eax); // Restore value.
__ mov(ecx, prop->key()->AsLiteral()->value());
- CallStoreIC(NOT_CONTEXTUAL);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
@@ -2393,7 +2416,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(ecx, eax);
__ pop(edx); // Receiver.
__ pop(eax); // Restore value.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic);
@@ -2404,44 +2427,58 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ mov(location, eax);
+ if (var->IsContextSlot()) {
+ __ mov(edx, eax);
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ push(eax); // Value.
+ __ push(esi); // Context.
+ __ push(Immediate(name));
+ __ push(Immediate(Smi::FromInt(strict_mode)));
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(ecx, var->name());
__ mov(edx, GlobalObjectOperand());
- CallStoreIC(CONTEXTUAL);
- } else if (op == Token::INIT_CONST) {
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- Label skip;
- __ mov(edx, StackOperand(var));
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &skip);
- __ mov(StackOperand(var), eax);
- __ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
+ if (var->IsLookupSlot()) {
__ push(eax);
__ push(esi);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackLocal() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, ecx);
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &skip, Label::kNear);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ push(eax); // Value.
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), strict_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2450,20 +2487,18 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ j(not_equal, &assign, Label::kNear);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
__ bind(&assign);
- __ mov(location, eax);
- if (var->IsContextSlot()) {
- __ mov(edx, eax);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, ecx);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2471,20 +2506,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ Check(equal, kLetBindingReInitialization);
}
- // Perform the assignment.
- __ mov(location, eax);
- if (var->IsContextSlot()) {
- __ mov(edx, eax);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(eax); // Value.
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2504,7 +2526,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
SetSourcePosition(expr->position());
__ mov(ecx, prop->key()->AsLiteral()->value());
__ pop(edx);
- CallStoreIC(NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -2520,10 +2542,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(edx);
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
@@ -2552,10 +2574,8 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- ContextualMode mode,
TypeFeedbackId ast_id) {
ic_total_count_++;
- ASSERT(mode != CONTEXTUAL || ast_id.IsNone());
__ call(code, RelocInfo::CODE_TARGET, ast_id);
}
@@ -2576,7 +2596,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
PrepareForBailout(callee, NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
- // is a classic mode method.
+ // is a sloppy mode method.
__ push(Immediate(isolate()->factory()->undefined_value()));
flags = NO_CALL_FUNCTION_FLAGS;
} else {
@@ -2668,15 +2688,15 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
SetSourcePosition(expr->position());
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ mov(ebx, cell);
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
+ __ LoadHeapObject(ebx, FeedbackVector());
+ __ mov(edx, Immediate(Smi::FromInt(expr->CallFeedbackSlot())));
// Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
+ __ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2696,13 +2716,13 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push the receiver of the enclosing function.
__ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
// Push the language mode.
- __ push(Immediate(Smi::FromInt(language_mode())));
+ __ push(Immediate(Smi::FromInt(strict_mode())));
// Push the start position of the scope the calls resides in.
__ push(Immediate(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
}
@@ -2718,8 +2738,8 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Call::CallType call_type = expr->GetCallType(isolate());
if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the call.
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2769,7 +2789,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// the object holding it (returned in edx).
__ push(context_register());
__ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ push(eax); // Function.
__ push(edx); // Receiver.
@@ -2843,15 +2863,22 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
SetSourcePosition(expr->position());
// Load function and argument count into edi and eax.
- __ Set(eax, Immediate(arg_count));
+ __ Move(eax, Immediate(arg_count));
__ mov(edi, Operand(esp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ mov(ebx, cell);
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
+ if (FLAG_pretenuring_call_new) {
+ StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(),
+ isolate()->factory()->NewAllocationSite());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ LoadHeapObject(ebx, FeedbackVector());
+ __ mov(edx, Immediate(Smi::FromInt(expr->CallNewFeedbackSlot())));
CallConstructStub stub(RECORD_CALL_TARGET);
__ call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@@ -3108,9 +3135,11 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
__ CheckMap(eax, map, if_false, DO_SMI_CHECK);
- __ cmp(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(0x80000000));
- __ j(not_equal, if_false);
- __ cmp(FieldOperand(eax, HeapNumber::kMantissaOffset), Immediate(0x00000000));
+ // Check if the exponent half is 0x80000000. Comparing against 1 and
+ // checking for overflow is the shortest possible encoding.
+ __ cmp(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(0x1));
+ __ j(no_overflow, if_false);
+ __ cmp(FieldOperand(eax, HeapNumber::kMantissaOffset), Immediate(0x0));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3227,7 +3256,7 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
// parameter count in eax.
VisitForAccumulatorValue(args->at(0));
__ mov(edx, eax);
- __ Set(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
+ __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(eax);
@@ -3239,7 +3268,7 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
Label exit;
// Get the number of formal parameters.
- __ Set(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
+ __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
// Check if the calling frame is an arguments adaptor frame.
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -3331,7 +3360,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
+ __ CallRuntime(Runtime::kHiddenLog, 2);
}
// Finally, we're expected to leave a value on the top of the stack.
__ mov(eax, isolate()->factory()->undefined_value());
@@ -3424,7 +3453,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
__ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
__ bind(&done);
context()->Plug(result);
}
@@ -3606,13 +3635,13 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// NaN.
- __ Set(result, Immediate(isolate()->factory()->nan_value()));
+ __ Move(result, Immediate(isolate()->factory()->nan_value()));
__ jmp(&done);
__ bind(&need_conversion);
// Move the undefined value into the result register, which will
// trigger conversion.
- __ Set(result, Immediate(isolate()->factory()->undefined_value()));
+ __ Move(result, Immediate(isolate()->factory()->undefined_value()));
__ jmp(&done);
NopRuntimeCallHelper call_helper;
@@ -3654,13 +3683,13 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
- __ Set(result, Immediate(isolate()->factory()->empty_string()));
+ __ Move(result, Immediate(isolate()->factory()->empty_string()));
__ jmp(&done);
__ bind(&need_conversion);
// Move smi zero into the result register, which will trigger
// conversion.
- __ Set(result, Immediate(Smi::FromInt(0)));
+ __ Move(result, Immediate(Smi::FromInt(0)));
__ jmp(&done);
NopRuntimeCallHelper call_helper;
@@ -3806,7 +3835,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
// Call runtime to perform the lookup.
__ push(cache);
__ push(key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
__ bind(&done);
context()->Plug(eax);
@@ -3911,8 +3940,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Check that all array elements are sequential ASCII strings, and
// accumulate the sum of their lengths, as a smi-encoded value.
- __ Set(index, Immediate(0));
- __ Set(string_length, Immediate(0));
+ __ Move(index, Immediate(0));
+ __ Move(string_length, Immediate(0));
// Loop condition: while (index < length).
// Live loop registers: index, array_length, string,
// scratch, string_length, elements.
@@ -4028,7 +4057,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
__ mov_b(separator_operand, scratch);
- __ Set(index, Immediate(0));
+ __ Move(index, Immediate(0));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
__ jmp(&loop_2_entry);
@@ -4065,7 +4094,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Long separator case (separator is more than one character).
__ bind(&long_separator);
- __ Set(index, Immediate(0));
+ __ Move(index, Immediate(0));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
__ jmp(&loop_3_entry);
@@ -4116,8 +4145,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
@@ -4181,20 +4210,18 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ push(Immediate(Smi::FromInt(strict_mode_flag)));
+ __ push(Immediate(Smi::FromInt(strict_mode())));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
__ push(GlobalObjectOperand());
__ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(kNonStrictMode)));
+ __ push(Immediate(Smi::FromInt(SLOPPY)));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(eax);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
@@ -4207,7 +4234,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// context where the variable was introduced.
__ push(context_register());
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
context()->Plug(eax);
}
} else {
@@ -4288,16 +4315,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -4416,9 +4438,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- NOT_CONTEXTUAL,
- expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4449,7 +4469,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(ecx, prop->key()->AsLiteral()->value());
__ pop(edx);
- CallStoreIC(NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4463,10 +4483,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ pop(ecx);
__ pop(edx);
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@@ -4488,7 +4508,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ mov(edx, GlobalObjectOperand());
__ mov(ecx, Immediate(proxy->name()));
// Use a regular load, not a contextual load, to avoid a reference
@@ -4497,6 +4517,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
PrepareForBailout(expr, TOS_REG);
context()->Plug(eax);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4506,7 +4527,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
__ bind(&slow);
__ push(esi);
__ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
@@ -4655,7 +4676,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4691,7 +4712,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(equal, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
__ test(eax, eax);
Split(not_zero, if_true, if_false, fall_through);
}
@@ -4881,6 +4902,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
}
Assembler::set_target_address_at(call_target_address,
+ unoptimized_code,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
@@ -4898,20 +4920,22 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
if (*jns_instr_address == kJnsInstruction) {
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
- Assembler::target_address_at(call_target_address));
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
return INTERRUPT;
}
ASSERT_EQ(kNopByteOne, *jns_instr_address);
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- if (Assembler::target_address_at(call_target_address) ==
+ if (Assembler::target_address_at(call_target_address, unoptimized_code) ==
isolate->builtins()->OnStackReplacement()->entry()) {
return ON_STACK_REPLACEMENT;
}
ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
- Assembler::target_address_at(call_target_address));
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
return OSR_AFTER_STACK_CHECK;
}
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index bd6dcefe1..c2be7da1a 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -351,7 +351,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
__ j(not_zero, slow_case);
// Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
__ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
@@ -657,7 +657,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
@@ -682,7 +682,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -859,7 +859,7 @@ static void KeyedStoreGenerateGenericHelper(
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -947,8 +947,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_state) {
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : name
// -- edx : receiver
@@ -956,9 +955,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_state,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, eax);
@@ -1064,17 +1061,14 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_ic_state,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, no_reg);
@@ -1136,7 +1130,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
@@ -1157,7 +1151,7 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 5a12ca969..0dbe3da13 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -103,7 +103,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- RegisterDependentCodeForEmbeddedMaps(code);
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
if (!info()->IsStub()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -175,11 +175,11 @@ bool LCodeGen::GeneratePrologue() {
}
#endif
- // Classic mode functions and builtins need to replace the receiver with the
+ // Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
if (info_->this_has_uses() &&
- info_->is_classic_mode() &&
+ info_->strict_mode() == SLOPPY &&
!info_->is_native()) {
Label ok;
// +1 for return address.
@@ -199,7 +199,7 @@ bool LCodeGen::GeneratePrologue() {
if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
// Move state of dynamic frame alignment into edx.
- __ Set(edx, Immediate(kNoAlignmentPadding));
+ __ Move(edx, Immediate(kNoAlignmentPadding));
Label do_not_pad, align_loop;
STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
@@ -297,7 +297,7 @@ bool LCodeGen::GeneratePrologue() {
__ CallStub(&stub);
} else {
__ push(edi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in eax. It replaces the context passed to us.
@@ -346,7 +346,7 @@ void LCodeGen::GenerateOsrPrologue() {
osr_pc_offset_ = masm()->pc_offset();
// Move state of dynamic frame alignment into edx.
- __ Set(edx, Immediate(kNoAlignmentPadding));
+ __ Move(edx, Immediate(kNoAlignmentPadding));
if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
Label do_not_pad, align_loop;
@@ -390,6 +390,9 @@ void LCodeGen::GenerateOsrPrologue() {
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
if (!instr->IsLazyBailout() && !instr->IsGap()) {
safepoints_.BumpLastLazySafepointIndex();
}
@@ -479,7 +482,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -954,10 +958,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -1181,6 +1181,14 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1366,301 +1374,325 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
}
-void LCodeGen::DoModI(LModI* instr) {
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
HMod* hmod = instr->hydrogen();
- HValue* left = hmod->left();
- HValue* right = hmod->right();
- if (hmod->RightIsPowerOf2()) {
- // TODO(svenpanne) We should really do the strength reduction on the
- // Hydrogen level.
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(ToRegister(instr->result())));
-
- // Note: The code below even works when right contains kMinInt.
- int32_t divisor = Abs(right->GetInteger32Constant());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ test(left_reg, Operand(left_reg));
- __ j(not_sign, &left_is_not_negative, Label::kNear);
- __ neg(left_reg);
- __ and_(left_reg, divisor - 1);
- __ neg(left_reg);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ jmp(&done, Label::kNear);
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ test(dividend, dividend);
+ __ j(not_sign, &dividend_is_not_negative, Label::kNear);
+ // Note that this is correct even for kMinInt operands.
+ __ neg(dividend);
+ __ and_(dividend, mask);
+ __ neg(dividend);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
}
+ __ jmp(&done, Label::kNear);
+ }
- __ bind(&left_is_not_negative);
- __ and_(left_reg, divisor - 1);
- __ bind(&done);
- } else {
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(eax));
- Register right_reg = ToRegister(instr->right());
- ASSERT(!right_reg.is(eax));
- ASSERT(!right_reg.is(edx));
- Register result_reg = ToRegister(instr->result());
- ASSERT(result_reg.is(edx));
+ __ bind(&dividend_is_not_negative);
+ __ and_(dividend, mask);
+ __ bind(&done);
+}
- Label done;
- // Check for x % 0, idiv would signal a divide error. We have to
- // deopt in this case because we can't return a NaN.
- if (right->CanBeZero()) {
- __ test(right_reg, Operand(right_reg));
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for kMinInt % -1, idiv would signal a divide error. We
- // have to deopt if we care about -0, because we can't return that.
- if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
- Label no_overflow_possible;
- __ cmp(left_reg, kMinInt);
- __ j(not_equal, &no_overflow_possible, Label::kNear);
- __ cmp(right_reg, -1);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr->environment());
- } else {
- __ j(not_equal, &no_overflow_possible, Label::kNear);
- __ Set(result_reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- }
- __ bind(&no_overflow_possible);
- }
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(eax));
- // Sign extend dividend in eax into edx:eax.
- __ cdq();
-
- // If we care about -0, test if the dividend is <0 and the result is 0.
- if (left->CanBeNegative() &&
- hmod->CanBeZero() &&
- hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label positive_left;
- __ test(left_reg, Operand(left_reg));
- __ j(not_sign, &positive_left, Label::kNear);
- __ idiv(right_reg);
- __ test(result_reg, Operand(result_reg));
- DeoptimizeIf(zero, instr->environment());
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(dividend, Abs(divisor));
+ __ imul(edx, edx, Abs(divisor));
+ __ mov(eax, dividend);
+ __ sub(eax, edx);
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ j(not_zero, &remainder_not_zero, Label::kNear);
+ __ cmp(dividend, Immediate(0));
+ DeoptimizeIf(less, instr->environment());
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+
+ Register left_reg = ToRegister(instr->left());
+ ASSERT(left_reg.is(eax));
+ Register right_reg = ToRegister(instr->right());
+ ASSERT(!right_reg.is(eax));
+ ASSERT(!right_reg.is(edx));
+ Register result_reg = ToRegister(instr->result());
+ ASSERT(result_reg.is(edx));
+
+ Label done;
+ // Check for x % 0, idiv would signal a divide error. We have to
+ // deopt in this case because we can't return a NaN.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ test(right_reg, Operand(right_reg));
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for kMinInt % -1, idiv would signal a divide error. We
+ // have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ cmp(left_reg, kMinInt);
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ cmp(right_reg, -1);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ Move(result_reg, Immediate(0));
__ jmp(&done, Label::kNear);
- __ bind(&positive_left);
}
+ __ bind(&no_overflow_possible);
+ }
+
+ // Sign extend dividend in eax into edx:eax.
+ __ cdq();
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label positive_left;
+ __ test(left_reg, Operand(left_reg));
+ __ j(not_sign, &positive_left, Label::kNear);
__ idiv(right_reg);
- __ bind(&done);
+ __ test(result_reg, Operand(result_reg));
+ DeoptimizeIf(zero, instr->environment());
+ __ jmp(&done, Label::kNear);
+ __ bind(&positive_left);
}
+ __ idiv(right_reg);
+ __ bind(&done);
}
-void LCodeGen::DoDivI(LDivI* instr) {
- if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
- Register dividend = ToRegister(instr->left());
- int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(dividend, Operand(dividend));
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ test(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ cmp(dividend, kMinInt);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ test(dividend, Immediate(mask));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ Move(result, dividend);
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (shift > 0) {
+ // The arithmetic shift is always OK, the 'if' is an optimization only.
+ if (shift > 1) __ sar(result, 31);
+ __ shr(result, 32 - shift);
+ __ add(result, dividend);
+ __ sar(result, shift);
+ }
+ if (divisor < 0) __ neg(result);
+}
- if (test_value != 0) {
- if (instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- Label done, negative;
- __ cmp(dividend, 0);
- __ j(less, &negative, Label::kNear);
- __ sar(dividend, power);
- if (divisor < 0) __ neg(dividend);
- __ jmp(&done, Label::kNear);
-
- __ bind(&negative);
- __ neg(dividend);
- __ sar(dividend, power);
- if (divisor > 0) __ neg(dividend);
- __ bind(&done);
- return; // Don't fall through to "__ neg" below.
- } else {
- // Deoptimize if remainder is not 0.
- __ test(dividend, Immediate(test_value));
- DeoptimizeIf(not_zero, instr->environment());
- __ sar(dividend, power);
- }
- }
- if (divisor < 0) __ neg(dividend);
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(edx));
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
return;
}
- LOperand* right = instr->right();
- ASSERT(ToRegister(instr->result()).is(eax));
- ASSERT(ToRegister(instr->left()).is(eax));
- ASSERT(!ToRegister(instr->right()).is(eax));
- ASSERT(!ToRegister(instr->right()).is(edx));
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ test(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ mov(eax, edx);
+ __ imul(eax, eax, divisor);
+ __ sub(eax, dividend);
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+}
- Register left_reg = eax;
+
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->left());
+ Register divisor = ToRegister(instr->right());
+ Register remainder = ToRegister(instr->temp());
+ Register result = ToRegister(instr->result());
+ ASSERT(dividend.is(eax));
+ ASSERT(remainder.is(edx));
+ ASSERT(result.is(eax));
+ ASSERT(!divisor.is(eax));
+ ASSERT(!divisor.is(edx));
// Check for x / 0.
- Register right_reg = ToRegister(right);
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ test(right_reg, ToOperand(right));
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ test(divisor, divisor);
DeoptimizeIf(zero, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ test(left_reg, Operand(left_reg));
- __ j(not_zero, &left_not_zero, Label::kNear);
- __ test(right_reg, ToOperand(right));
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ test(dividend, dividend);
+ __ j(not_zero, &dividend_not_zero, Label::kNear);
+ __ test(divisor, divisor);
DeoptimizeIf(sign, instr->environment());
- __ bind(&left_not_zero);
+ __ bind(&dividend_not_zero);
}
// Check for (kMinInt / -1).
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmp(left_reg, kMinInt);
- __ j(not_zero, &left_not_min_int, Label::kNear);
- __ cmp(right_reg, -1);
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ cmp(dividend, kMinInt);
+ __ j(not_zero, &dividend_not_min_int, Label::kNear);
+ __ cmp(divisor, -1);
DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
+ __ bind(&dividend_not_min_int);
}
- // Sign extend to edx.
+ // Sign extend to edx (= remainder).
__ cdq();
- __ idiv(right_reg);
+ __ idiv(divisor);
- if (instr->is_flooring()) {
+ if (hdiv->IsMathFloorOfDiv()) {
Label done;
- __ test(edx, edx);
+ __ test(remainder, remainder);
__ j(zero, &done, Label::kNear);
- __ xor_(edx, right_reg);
- __ sar(edx, 31);
- __ add(eax, edx);
+ __ xor_(remainder, divisor);
+ __ sar(remainder, 31);
+ __ add(result, remainder);
__ bind(&done);
- } else if (!instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
+ } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
- __ test(edx, Operand(edx));
+ __ test(remainder, remainder);
DeoptimizeIf(not_zero, instr->environment());
}
}
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- ASSERT(instr->right()->IsConstantOperand());
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
- Register dividend = ToRegister(instr->left());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- Register result = ToRegister(instr->result());
-
- switch (divisor) {
- case 0:
- DeoptimizeIf(no_condition, instr->environment());
- return;
-
- case 1:
- __ Move(result, dividend);
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ if (divisor == 1) return;
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ sar(dividend, shift);
return;
+ }
- case -1:
- __ Move(result, dividend);
- __ neg(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // If the divisor is negative, we have to negate and handle edge cases.
+ Label not_kmin_int, done;
+ __ neg(dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ // Note that we could emit branch-free code, but that would need one more
+ // register.
+ if (divisor == -1) {
DeoptimizeIf(overflow, instr->environment());
+ } else {
+ __ j(no_overflow, &not_kmin_int, Label::kNear);
+ __ mov(dividend, Immediate(kMinInt / divisor));
+ __ jmp(&done, Label::kNear);
}
+ }
+ __ bind(&not_kmin_int);
+ __ sar(dividend, shift);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(edx));
+
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
return;
}
- uint32_t divisor_abs = abs(divisor);
- if (IsPowerOf2(divisor_abs)) {
- int32_t power = WhichPowerOf2(divisor_abs);
- if (divisor < 0) {
- // Input[dividend] is clobbered.
- // The sequence is tedious because neg(dividend) might overflow.
- __ mov(result, dividend);
- __ sar(dividend, 31);
- __ neg(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ shl(dividend, 32 - power);
- __ sar(result, power);
- __ not_(dividend);
- // Clear result.sign if dividend.sign is set.
- __ and_(result, dividend);
- } else {
- __ Move(result, dividend);
- __ sar(result, power);
- }
- } else {
- ASSERT(ToRegister(instr->left()).is(eax));
- ASSERT(ToRegister(instr->result()).is(edx));
- Register scratch = ToRegister(instr->temp());
-
- // Find b which: 2^b < divisor_abs < 2^(b+1).
- unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
- unsigned shift = 32 + b; // Precision +1bit (effectively).
- double multiplier_f =
- static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
- int64_t multiplier;
- if (multiplier_f - std::floor(multiplier_f) < 0.5) {
- multiplier = static_cast<int64_t>(std::floor(multiplier_f));
- } else {
- multiplier = static_cast<int64_t>(std::floor(multiplier_f)) + 1;
- }
- // The multiplier is a uint32.
- ASSERT(multiplier > 0 &&
- multiplier < (static_cast<int64_t>(1) << 32));
- __ mov(scratch, dividend);
- if (divisor < 0 &&
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
- }
- __ mov(edx, static_cast<int32_t>(multiplier));
- __ imul(edx);
- if (static_cast<int32_t>(multiplier) < 0) {
- __ add(edx, scratch);
- }
- Register reg_lo = eax;
- Register reg_byte_scratch = scratch;
- if (!reg_byte_scratch.is_byte_register()) {
- __ xchg(reg_lo, reg_byte_scratch);
- reg_lo = scratch;
- reg_byte_scratch = eax;
- }
- if (divisor < 0) {
- __ xor_(reg_byte_scratch, reg_byte_scratch);
- __ cmp(reg_lo, 0x40000000);
- __ setcc(above, reg_byte_scratch);
- __ neg(edx);
- __ sub(edx, reg_byte_scratch);
- } else {
- __ xor_(reg_byte_scratch, reg_byte_scratch);
- __ cmp(reg_lo, 0xC0000000);
- __ setcc(above_equal, reg_byte_scratch);
- __ add(edx, reg_byte_scratch);
- }
- __ sar(edx, shift - 32);
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ test(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
}
+
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+ return;
+ }
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp3());
+ ASSERT(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
+ Label needs_adjustment, done;
+ __ cmp(dividend, Immediate(0));
+ __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+ __ jmp(&done, Label::kNear);
+ __ bind(&needs_adjustment);
+ __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(temp, Abs(divisor));
+ if (divisor < 0) __ neg(edx);
+ __ dec(edx);
+ __ bind(&done);
}
@@ -1894,12 +1926,12 @@ void LCodeGen::DoSubI(LSubI* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
- __ Set(ToRegister(instr->result()), Immediate(instr->value()));
+ __ Move(ToRegister(instr->result()), Immediate(instr->value()));
}
void LCodeGen::DoConstantS(LConstantS* instr) {
- __ Set(ToRegister(instr->result()), Immediate(instr->value()));
+ __ Move(ToRegister(instr->result()), Immediate(instr->value()));
}
@@ -1926,22 +1958,22 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope scope2(masm(), SSE4_1);
if (lower != 0) {
- __ Set(temp, Immediate(lower));
+ __ Move(temp, Immediate(lower));
__ movd(res, Operand(temp));
- __ Set(temp, Immediate(upper));
+ __ Move(temp, Immediate(upper));
__ pinsrd(res, Operand(temp), 1);
} else {
__ xorps(res, res);
- __ Set(temp, Immediate(upper));
+ __ Move(temp, Immediate(upper));
__ pinsrd(res, Operand(temp), 1);
}
} else {
- __ Set(temp, Immediate(upper));
+ __ Move(temp, Immediate(upper));
__ movd(res, Operand(temp));
__ psllq(res, 32);
if (lower != 0) {
XMMRegister xmm_scratch = double_scratch0();
- __ Set(temp, Immediate(lower));
+ __ Move(temp, Immediate(lower));
__ movd(xmm_scratch, Operand(temp));
__ orps(res, xmm_scratch);
}
@@ -2622,8 +2654,8 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
__ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
__ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
- Immediate(0x80000000));
- EmitFalseBranch(instr, not_equal);
+ Immediate(0x1));
+ EmitFalseBranch(instr, no_overflow);
__ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
Immediate(0x00000000));
EmitBranch(instr, equal);
@@ -3409,7 +3441,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3707,7 +3739,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ push(esi); // The context is the first argument.
__ push(Immediate(instr->hydrogen()->pairs()));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
}
@@ -3831,7 +3863,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
+ CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0,
instr, instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp.is(eax)) __ mov(tmp, eax);
@@ -3923,8 +3955,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
- __ cmp(output_reg, 0x80000000u);
- DeoptimizeIf(equal, instr->environment());
+ __ cmp(output_reg, 0x1);
+ DeoptimizeIf(overflow, instr->environment());
} else {
Label negative_sign, done;
// Deoptimize on unordered.
@@ -3940,7 +3972,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr->environment());
- __ Set(output_reg, Immediate(0));
+ __ Move(output_reg, Immediate(0));
__ jmp(&done, Label::kNear);
__ bind(&positive_sign);
}
@@ -3948,8 +3980,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
// Use truncating instruction (OK because input is positive).
__ cvttsd2si(output_reg, Operand(input_reg));
// Overflow is signalled with minint.
- __ cmp(output_reg, 0x80000000u);
- DeoptimizeIf(equal, instr->environment());
+ __ cmp(output_reg, 0x1);
+ DeoptimizeIf(overflow, instr->environment());
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
@@ -3988,9 +4020,9 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ addsd(xmm_scratch, input_reg);
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
- __ cmp(output_reg, 0x80000000u);
+ __ cmp(output_reg, 0x1);
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(overflow, instr->environment());
__ jmp(&done, dist);
__ bind(&below_one_half);
@@ -4004,9 +4036,9 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ subsd(input_temp, xmm_scratch);
__ cvttsd2si(output_reg, Operand(input_temp));
// Catch minint due to overflow, and to prevent overflow when compensating.
- __ cmp(output_reg, 0x80000000u);
+ __ cmp(output_reg, 0x1);
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(overflow, instr->environment());
__ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
@@ -4025,7 +4057,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ RecordComment("Minus zero");
DeoptimizeIf(not_zero, instr->environment());
}
- __ Set(output_reg, Immediate(0));
+ __ Move(output_reg, Immediate(0));
__ bind(&done);
}
@@ -4138,6 +4170,21 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
}
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ CpuFeatureScope scope(masm(), SSE2);
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label not_zero_input;
+ __ bsr(result, input);
+
+ __ j(not_zero, &not_zero_input);
+ __ Move(result, Immediate(63)); // 63^31 == 32
+
+ __ bind(&not_zero_input);
+ __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
+}
+
+
void LCodeGen::DoMathExp(LMathExp* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input = ToDoubleRegister(instr->value());
@@ -4189,10 +4236,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
// No cell in ebx for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ mov(ebx, Immediate(undefined_value));
+ __ mov(ebx, isolate()->factory()->undefined_value());
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- __ Set(eax, Immediate(instr->arity()));
+ __ Move(eax, Immediate(instr->arity()));
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -4202,8 +4248,8 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->constructor()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
- __ Set(eax, Immediate(instr->arity()));
- __ mov(ebx, factory()->undefined_value());
+ __ Move(eax, Immediate(instr->arity()));
+ __ mov(ebx, isolate()->factory()->undefined_value());
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -4291,18 +4337,17 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register object = ToRegister(instr->object());
Handle<Map> transition = instr->transition();
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (FLAG_track_fields && representation.IsSmi()) {
- if (instr->value()->IsConstantOperand()) {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (!IsSmi(operand_value)) {
- DeoptimizeIf(no_condition, instr->environment());
- }
- }
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ ASSERT(!(representation.IsSmi() &&
+ instr->value()->IsConstantOperand() &&
+ !IsSmi(LConstantOperand::cast(instr->value()))));
+ if (representation.IsHeapObject()) {
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (IsInteger32(operand_value)) {
+ if (chunk_->LookupConstant(operand_value)->HasSmiValue()) {
DeoptimizeIf(no_condition, instr->environment());
}
} else {
@@ -4310,6 +4355,9 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
__ test(value, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr->environment());
+
+ // We know that value is a smi now, so we can omit the check below.
+ check_needed = OMIT_SMI_CHECK;
}
}
} else if (representation.IsDouble()) {
@@ -4347,10 +4395,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-
Register write_register = object;
if (!access.IsInobject()) {
write_register = ToRegister(instr->temp());
@@ -4398,8 +4442,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(eax));
__ mov(ecx, instr->name());
- Handle<Code> ic = StoreIC::initialize_stub(isolate(),
- instr->strict_mode_flag());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4504,7 +4547,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4650,7 +4693,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(ecx));
ASSERT(ToRegister(instr->value()).is(eax));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ Handle<Code> ic = instr->strict_mode() == STRICT
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -4743,7 +4786,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ Set(result, Immediate(0));
+ __ Move(result, Immediate(0));
PushSafepointRegistersScope scope(this);
__ push(string);
@@ -4759,7 +4802,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
+ CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2,
instr, instr->context());
__ AssertSmi(eax);
__ SmiUntag(eax);
@@ -4792,7 +4835,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
__ cmp(char_code, String::kMaxOneByteCharCode);
__ j(above, deferred->entry());
- __ Set(result, Immediate(factory()->single_character_string_cache()));
+ __ Move(result, Immediate(factory()->single_character_string_cache()));
__ mov(result, FieldOperand(result,
char_code, times_pointer_size,
FixedArray::kHeaderSize));
@@ -4809,7 +4852,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ Set(result, Immediate(0));
+ __ Move(result, Immediate(0));
PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
@@ -4848,16 +4891,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
-void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
- Register input = ToRegister(instr->value());
- __ SmiTag(input);
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4877,17 +4910,6 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
-void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
- Register input = ToRegister(instr->value());
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- __ test(input, Immediate(0xc0000000));
- DeoptimizeIf(not_zero, instr->environment());
- }
- __ SmiTag(input);
-}
-
-
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
@@ -4896,7 +4918,8 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
const X87Stack& x87_stack)
: LDeferredCode(codegen, x87_stack), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_, instr_->value(), SIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
+ NULL, SIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4923,7 +4946,8 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
const X87Stack& x87_stack)
: LDeferredCode(codegen, x87_stack), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_, instr_->value(), UNSIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+ instr_->temp2(), UNSIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4943,19 +4967,16 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
-void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness) {
- Label slow;
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness) {
+ Label done, slow;
Register reg = ToRegister(value);
- Register tmp = reg.is(eax) ? ecx : eax;
+ Register tmp = ToRegister(temp1);
XMMRegister xmm_scratch = double_scratch0();
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- Label done;
-
if (signedness == SIGNED_INT32) {
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
@@ -4973,8 +4994,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
} else {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ LoadUint32(xmm_scratch, reg,
- ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
+ __ LoadUint32(xmm_scratch, reg, ToDoubleRegister(temp2));
} else {
// There's no fild variant for unsigned values, so zero-extend to a 64-bit
// int manually.
@@ -4993,21 +5013,26 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ Move(reg, Immediate(0));
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ StoreToSafepointRegisterSlot(reg, Immediate(0));
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- if (!reg.is(eax)) __ mov(reg, eax);
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
+
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(reg, eax);
+ }
// Done. Put the value in xmm_scratch into the value of the allocated heap
// number.
@@ -5018,7 +5043,6 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
- __ StoreToSafepointRegisterSlot(reg, reg);
}
@@ -5070,16 +5094,16 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register reg = ToRegister(instr->result());
- __ Set(reg, Immediate(0));
+ __ Move(reg, Immediate(0));
PushSafepointRegistersScope scope(this);
// NumberTagI and NumberTagD use the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(reg, eax);
@@ -5087,10 +5111,18 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(input));
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ test(input, Immediate(0xc0000000));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ SmiTag(input);
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
}
@@ -5243,6 +5275,10 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
Register input_reg = ToRegister(instr->value());
+ // The input was optimistically untagged; revert it.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
+
if (instr->truncating()) {
Label no_heap_number, check_bools, check_false;
@@ -5258,21 +5294,20 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
// for truncating conversions.
__ cmp(input_reg, factory()->undefined_value());
__ j(not_equal, &check_bools, Label::kNear);
- __ Set(input_reg, Immediate(0));
+ __ Move(input_reg, Immediate(0));
__ jmp(done);
__ bind(&check_bools);
__ cmp(input_reg, factory()->true_value());
__ j(not_equal, &check_false, Label::kNear);
- __ Set(input_reg, Immediate(1));
+ __ Move(input_reg, Immediate(1));
__ jmp(done);
__ bind(&check_false);
__ cmp(input_reg, factory()->false_value());
__ RecordComment("Deferred TaggedToI: cannot truncate");
DeoptimizeIf(not_equal, instr->environment());
- __ Set(input_reg, Immediate(0));
- __ jmp(done);
+ __ Move(input_reg, Immediate(0));
} else {
Label bailout;
XMMRegister scratch = (instr->temp() != NULL)
@@ -5312,9 +5347,13 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
} else {
DeferredTaggedToI* deferred =
new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
-
- __ JumpIfNotSmi(input_reg, deferred->entry());
+ // Optimistically untag the input.
+ // If the input is a HeapObject, SmiUntag will set the carry flag.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ SmiUntag(input_reg);
+ // Branch to deferred code if the input was tagged.
+ // The deferred code will take care of restoring the tag.
+ __ j(carry, deferred->entry());
__ bind(deferred->exit());
}
}
@@ -5746,6 +5785,45 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
}
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope2(masm(), SSE4_1);
+ __ pextrd(result_reg, value_reg, 1);
+ } else {
+ XMMRegister xmm_scratch = double_scratch0();
+ __ pshufd(xmm_scratch, value_reg, 1);
+ __ movd(result_reg, xmm_scratch);
+ }
+ } else {
+ __ movd(result_reg, value_reg);
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ XMMRegister result_reg = ToDoubleRegister(instr->result());
+ CpuFeatureScope scope(masm(), SSE2);
+
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope2(masm(), SSE4_1);
+ __ movd(result_reg, lo_reg);
+ __ pinsrd(result_reg, hi_reg, 1);
+ } else {
+ XMMRegister xmm_scratch = double_scratch0();
+ __ movd(result_reg, hi_reg);
+ __ psllq(result_reg, 32);
+ __ movd(xmm_scratch, lo_reg);
+ __ orps(result_reg, xmm_scratch);
+ }
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
@@ -5820,7 +5898,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ Set(result, Immediate(Smi::FromInt(0)));
+ __ Move(result, Immediate(Smi::FromInt(0)));
PushSafepointRegistersScope scope(this);
if (instr->size()->IsRegister()) {
@@ -5848,7 +5926,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ push(Immediate(Smi::FromInt(flags)));
CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
}
@@ -5881,7 +5959,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(instr->hydrogen()->pattern()));
__ push(Immediate(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
__ mov(ebx, eax);
__ bind(&materialized);
@@ -5893,7 +5971,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&runtime_allocate);
__ push(ebx);
__ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
__ pop(ebx);
__ bind(&allocated);
@@ -5918,7 +5996,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -5927,7 +6005,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
__ push(Immediate(instr->hydrogen()->shared_info()));
__ push(Immediate(pretenure ? factory()->true_value()
: factory()->false_value()));
- CallRuntime(Runtime::kNewClosure, 3, instr);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
}
}
@@ -6072,7 +6150,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -6107,7 +6185,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
ASSERT(instr->HasEnvironment());
@@ -6148,10 +6226,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index fa5e88b03..079595cba 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -148,9 +148,11 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness);
+ void DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
@@ -177,9 +179,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
+ StrictMode strict_mode() const { return info()->strict_mode(); }
Scope* scope() const { return scope_; }
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
index d621bd261..01821d95f 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -309,7 +309,7 @@ void LGapResolver::EmitMove(int index) {
Representation r = cgen_->IsSmi(constant_source)
? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
- __ Set(dst, cgen_->ToImmediate(constant_source, r));
+ __ Move(dst, cgen_->ToImmediate(constant_source, r));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
@@ -342,7 +342,7 @@ void LGapResolver::EmitMove(int index) {
Representation r = cgen_->IsSmi(constant_source)
? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
- __ Set(dst, cgen_->ToImmediate(constant_source, r));
+ __ Move(dst, cgen_->ToImmediate(constant_source, r));
} else {
Register tmp = EnsureTempRegister();
__ LoadObject(tmp, cgen_->ToHandle(constant_source));
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index a9d49205c..696c6be6e 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -1005,30 +1005,22 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
LInstruction* goto_instr = CheckElideControlInstruction(instr);
if (goto_instr != NULL) return goto_instr;
- ToBooleanStub::Types expected = instr->expected_input_types();
-
- // Tagged values that are not known smis or booleans require a
- // deoptimization environment. If the instruction is generic no
- // environment is needed since all cases are handled.
HValue* value = instr->value();
- Representation rep = value->representation();
+ Representation r = value->representation();
HType type = value->type();
- if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) {
- return new(zone()) LBranch(UseRegister(value), NULL);
- }
-
- bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
- LOperand* temp = needs_temp ? TempRegister() : NULL;
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
- // The Generic stub does not have a deopt, so we need no environment.
- if (expected.IsGeneric()) {
- return new(zone()) LBranch(UseRegister(value), temp);
+ bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+ type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+ LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
+ LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp);
+ if (!easy_case &&
+ ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ !expected.IsGeneric())) {
+ branch = AssignEnvironment(branch);
}
-
- // We need a temporary register when we have to access the map *or* we have
- // no type info yet, in which case we handle all cases (including the ones
- // involving maps).
- return AssignEnvironment(new(zone()) LBranch(UseRegister(value), temp));
+ return branch;
}
@@ -1195,6 +1187,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
default:
UNREACHABLE();
return NULL;
@@ -1220,8 +1213,12 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
LOperand* context = UseAny(instr->context()); // Deferred use.
LOperand* input = UseRegisterAtStart(instr->value());
- LMathAbs* result = new(zone()) LMathAbs(context, input);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LMathAbs(context, input));
+ Representation r = instr->value()->representation();
+ if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
}
@@ -1233,6 +1230,13 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
}
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
@@ -1324,24 +1328,72 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(eax);
+ LOperand* temp2 = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LDivByConstI(
+ dividend, divisor, temp1, temp2), edx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), eax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LDivI(
+ dividend, divisor, temp), eax);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanOverflow) ||
+ (!instr->IsMathFloorOfDiv() &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div =
- new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
}
- // The temporary operand is necessary to ensure that right is not allocated
- // into edx.
- LOperand* temp = FixedTemp(edx);
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(result, eax));
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else {
@@ -1350,78 +1402,114 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(eax);
+ LOperand* temp2 = FixedTemp(edx);
+ LOperand* temp3 =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result =
+ DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
+ divisor,
+ temp1,
+ temp2,
+ temp3),
+ edx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- if (!right->IsConstant()) {
- ASSERT(right->representation().IsInteger32());
- // The temporary operand is necessary to ensure that right is not allocated
- // into edx.
- LOperand* temp = FixedTemp(edx);
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(flooring_div, eax));
- }
-
- ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
- LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
- int32_t divisor_si = HConstant::cast(right)->Integer32Value();
- if (divisor_si == 0) {
- LOperand* dividend = UseRegister(instr->left());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL)));
- } else if (IsPowerOf2(abs(divisor_si))) {
- // use dividend as temp if divisor < 0 && divisor != -1
- LOperand* dividend = divisor_si < -1 ? UseTempRegister(instr->left()) :
- UseRegisterAtStart(instr->left());
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
} else {
- // needs edx:eax, plus a temp
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* temp = TempRegister();
- LInstruction* result = DefineFixed(
- new(zone()) LMathFloorOfDiv(dividend, divisor, temp), edx);
- return divisor_si < 0 ? AssignEnvironment(result) : result;
+ return DoDivI(instr);
}
}
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(eax);
+ LOperand* temp2 = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LModByConstI(
+ dividend, divisor, temp1, temp2), eax);
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), eax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(edx);
+ LInstruction* result = DefineFixed(new(zone()) LModI(
+ dividend, divisor, temp), edx);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
-
if (instr->RightIsPowerOf2()) {
- ASSERT(!right->CanBeZero());
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseOrConstant(right),
- NULL);
- LInstruction* result = DefineSameAsFirst(mod);
- return (left->CanBeNegative() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
- return AssignEnvironment(DefineSameAsFirst(mod));
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
} else {
- // The temporary operand is necessary to ensure that right is not
- // allocated into edx.
- LModI* mod = new(zone()) LModI(UseFixed(left, eax),
- UseRegister(right),
- FixedTemp(edx));
- LInstruction* result = DefineFixed(mod, edx);
- return (right->CanBeZero() ||
- (left->RangeCanInclude(kMinInt) &&
- right->RangeCanInclude(-1) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
- (left->CanBeNegative() &&
- instr->CanBeZero() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)))
- ? AssignEnvironment(result)
- : result;
+ return DoModI(instr);
}
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MOD, instr);
@@ -1809,8 +1897,12 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = UseRegister(instr->value());
// Temp register only necessary for minus zero check.
LOperand* temp = TempRegister();
- LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
- return AssignEnvironment(DefineAsRegister(res));
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LNumberUntagD(value, temp));
+ if (!instr->value()->representation().IsSmi()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
@@ -1829,8 +1921,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* xmm_temp =
(CpuFeatures::IsSafeForSnapshot(SSE2) && !truncating)
? FixedTemp(xmm1) : NULL;
- LTaggedToI* res = new(zone()) LTaggedToI(UseRegister(val), xmm_temp);
- return AssignEnvironment(DefineSameAsFirst(res));
+ LInstruction* result = DefineSameAsFirst(
+ new(zone()) LTaggedToI(UseRegister(val), xmm_temp));
+ if (!instr->value()->representation().IsSmi()) {
+ // Note: Only deopts in deferred code.
+ result = AssignEnvironment(result);
+ }
+ return result;
}
}
} else if (from.IsDouble()) {
@@ -1854,35 +1951,37 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = needs_temp ?
UseTempRegister(instr->value()) : UseRegister(instr->value());
LOperand* temp = needs_temp ? TempRegister() : NULL;
- return AssignEnvironment(
- DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LDoubleToI(value, temp));
+ if (!truncating) result = AssignEnvironment(result);
+ return result;
}
} else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- if (val->HasRange() && val->range()->IsInSmiRange()) {
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
return DefineSameAsFirst(new(zone()) LSmiTag(value));
} else if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = CpuFeatures::IsSupported(SSE2) ? FixedTemp(xmm1)
- : NULL;
- LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = CpuFeatures::IsSupported(SSE2) ? FixedTemp(xmm1)
+ : NULL;
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+ return AssignPointerMap(DefineSameAsFirst(result));
} else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ LOperand* temp = TempRegister();
+ LNumberTagI* result = new(zone()) LNumberTagI(value, temp);
+ return AssignPointerMap(DefineSameAsFirst(result));
}
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result = val->CheckFlag(HInstruction::kUint32)
- ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
- : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return result;
+ LInstruction* result = DefineSameAsFirst(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
}
- return AssignEnvironment(result);
+ return result;
} else {
ASSERT(to.IsDouble());
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
@@ -1939,6 +2038,7 @@ LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
}
LCheckMaps* result = new(zone()) LCheckMaps(value);
if (!instr->CanOmitMapChecks()) {
+ // Note: Only deopts in deferred code.
AssignEnvironment(result);
if (instr->has_migration_target()) return AssignPointerMap(result);
}
@@ -1975,6 +2075,20 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
@@ -2033,7 +2147,10 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2049,7 +2166,10 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
temp = NULL;
}
LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2091,11 +2211,11 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LOperand* key = clobbers_key
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
+ LInstruction* result = NULL;
if (!instr->is_typed_elements()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
- result = new(zone()) LLoadKeyed(obj, key);
+ result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
} else {
ASSERT(
(instr->representation().IsInteger32() &&
@@ -2103,15 +2223,20 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
(IsDoubleOrFloatElementsKind(instr->elements_kind()))));
LOperand* backing_store = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(backing_store, key);
+ result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
}
- DefineAsRegister(result);
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UINT32_ELEMENTS);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- return can_deoptimize ? AssignEnvironment(result) : result;
+ if ((instr->is_external() || instr->is_fixed_typed_array()) ?
+ // see LCodeGen::DoLoadKeyedExternalArray
+ ((instr->elements_kind() == EXTERNAL_UINT32_ELEMENTS ||
+ instr->elements_kind() == UINT32_ELEMENTS) &&
+ !instr->CheckFlag(HInstruction::kUint32)) :
+ // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+ // LCodeGen::DoLoadKeyedFixedArray
+ instr->RequiresHoleCheck()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2274,7 +2399,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool can_be_constant = instr->value()->IsConstant() &&
HConstant::cast(instr->value())->NotInNewSpace() &&
- !(FLAG_track_double_fields && instr->field_representation().IsDouble());
+ !instr->field_representation().IsDouble();
LOperand* val;
if (instr->field_representation().IsInteger8() ||
@@ -2286,10 +2411,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
val = UseTempRegister(instr->value());
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
- } else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ } else if (instr->field_representation().IsSmi()) {
val = UseTempRegister(instr->value());
- } else if (FLAG_track_double_fields &&
- instr->field_representation().IsDouble()) {
+ } else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
@@ -2303,13 +2427,14 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// We need a temporary register for write barrier of the map field.
LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
- LStoreNamedField* result =
+ LInstruction* result =
new(zone()) LStoreNamedField(obj, val, temp, temp_map);
- if (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject()) {
- if (!instr->value()->type().IsHeapObject()) {
- return AssignEnvironment(result);
- }
+ if (!instr->access().IsExternalMemory() &&
+ instr->field_representation().IsHeapObject() &&
+ (val->IsConstantOperand()
+ ? HConstant::cast(instr->value())->HasSmiValue()
+ : !instr->value()->type().IsHeapObject())) {
+ result = AssignEnvironment(result);
}
return result;
}
@@ -2341,7 +2466,7 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* context = UseAny(instr->context());
LStringCharCodeAt* result =
new(zone()) LStringCharCodeAt(context, string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ return AssignPointerMap(DefineAsRegister(result));
}
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index a36cf413e..7964b7f6e 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -82,17 +82,23 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
V(DivI) \
+ V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
@@ -105,7 +111,6 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -126,14 +131,16 @@ class LCodeGen;
V(LoadRoot) \
V(MapEnumLength) \
V(MathAbs) \
+ V(MathClz32) \
V(MathExp) \
V(MathFloor) \
- V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -172,7 +179,6 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
- V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(WrapReceiver)
@@ -633,6 +639,49 @@ class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LModByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -650,29 +699,52 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LDivI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
- bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
};
-class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
+ LDivI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
@@ -682,8 +754,55 @@ class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
};
@@ -782,6 +901,18 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathClz32(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LMathExp(LOperand* value,
@@ -1884,19 +2015,6 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
@@ -1911,40 +2029,31 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- explicit LUint32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagI(LOperand* value) {
+ LNumberTagI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
+ temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- LNumberTagU(LOperand* value, LOperand* temp) {
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
- temps_[0] = temp;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
@@ -2023,6 +2132,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -2105,7 +2215,7 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2162,7 +2272,7 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2391,6 +2501,33 @@ class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
@@ -2610,6 +2747,15 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HBinaryOperation* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
private:
enum Status {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index faf768e11..7847b3b39 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -214,22 +214,22 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
Register result_reg) {
Label done;
Label conv_failure;
- pxor(scratch_reg, scratch_reg);
+ xorps(scratch_reg, scratch_reg);
cvtsd2si(result_reg, input_reg);
test(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
- cmp(result_reg, Immediate(0x80000000));
- j(equal, &conv_failure, Label::kNear);
+ cmp(result_reg, Immediate(0x1));
+ j(overflow, &conv_failure, Label::kNear);
mov(result_reg, Immediate(0));
- setcc(above, result_reg);
+ setcc(sign, result_reg);
sub(result_reg, Immediate(1));
and_(result_reg, Immediate(255));
jmp(&done, Label::kNear);
bind(&conv_failure);
- Set(result_reg, Immediate(0));
+ Move(result_reg, Immediate(0));
ucomisd(input_reg, scratch_reg);
j(below, &done, Label::kNear);
- Set(result_reg, Immediate(255));
+ Move(result_reg, Immediate(255));
bind(&done);
}
@@ -256,8 +256,8 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
XMMRegister input_reg) {
Label done;
cvttsd2si(result_reg, Operand(input_reg));
- cmp(result_reg, 0x80000000u);
- j(not_equal, &done, Label::kNear);
+ cmp(result_reg, 0x1);
+ j(no_overflow, &done, Label::kNear);
sub(esp, Immediate(kDoubleSize));
movsd(MemOperand(esp, 0), input_reg);
@@ -374,8 +374,8 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
CpuFeatureScope scope(this, SSE2);
movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, Operand(xmm0));
- cmp(result_reg, 0x80000000u);
- j(not_equal, &done, Label::kNear);
+ cmp(result_reg, 0x1);
+ j(no_overflow, &done, Label::kNear);
// Check if the input was 0x8000000 (kMinInt).
// If no, then we got an overflow and we deoptimize.
ExternalReference min_int = ExternalReference::address_of_min_int();
@@ -715,7 +715,7 @@ void MacroAssembler::RecordWrite(Register object,
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
- Set(eax, Immediate(0));
+ Move(eax, Immediate(0));
mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
@@ -729,20 +729,6 @@ void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
}
-void MacroAssembler::Set(Register dst, const Immediate& x) {
- if (x.is_zero()) {
- xor_(dst, dst); // Shorter than mov.
- } else {
- mov(dst, x);
- }
-}
-
-
-void MacroAssembler::Set(const Operand& dst, const Immediate& x) {
- mov(dst, x);
-}
-
-
bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
static const int kMaxImmediateBits = 17;
if (!RelocInfo::IsNone(x.rmode_)) return false;
@@ -750,12 +736,12 @@ bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
}
-void MacroAssembler::SafeSet(Register dst, const Immediate& x) {
+void MacroAssembler::SafeMove(Register dst, const Immediate& x) {
if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
- Set(dst, Immediate(x.x_ ^ jit_cookie()));
+ Move(dst, Immediate(x.x_ ^ jit_cookie()));
xor_(dst, jit_cookie());
} else {
- Set(dst, x);
+ Move(dst, x);
}
}
@@ -1037,6 +1023,20 @@ void MacroAssembler::AssertName(Register object) {
}
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ cmp(object, isolate()->factory()->undefined_value());
+ j(equal, &done_checking);
+ cmp(FieldOperand(object, 0),
+ Immediate(isolate()->factory()->allocation_site_map()));
+ Assert(equal, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
+
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
@@ -2244,7 +2244,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- Set(eax, Immediate(num_arguments));
+ Move(eax, Immediate(num_arguments));
mov(ebx, Immediate(ExternalReference(f, isolate())));
CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? save_doubles
: kDontSaveFPRegs);
@@ -2269,7 +2269,7 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- Set(eax, Immediate(num_arguments));
+ Move(eax, Immediate(num_arguments));
JumpToExternalReference(ext);
}
@@ -2429,7 +2429,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
bind(&promote_scheduled_exception);
{
FrameScope frame(this, StackFrame::INTERNAL);
- CallRuntime(Runtime::kPromoteScheduledException, 0);
+ CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
}
jmp(&exception_handled);
@@ -2689,41 +2689,6 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- mov(map_out, FieldOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-
-void MacroAssembler::LoadGlobalContext(Register global_context) {
- // Load the global or builtins object from the current context.
- mov(global_context,
- Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- mov(global_context,
- FieldOperand(global_context, GlobalObject::kNativeContextOffset));
-}
-
-
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
mov(function,
@@ -2868,6 +2833,37 @@ void MacroAssembler::Move(Register dst, Register src) {
}
+void MacroAssembler::Move(Register dst, const Immediate& x) {
+ if (x.is_zero()) {
+ xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
+ } else {
+ mov(dst, x);
+ }
+}
+
+
+void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
+ mov(dst, x);
+}
+
+
+void MacroAssembler::Move(XMMRegister dst, double val) {
+ // TODO(titzer): recognize double constants with ExternalReferences.
+ CpuFeatureScope scope(this, SSE2);
+ uint64_t int_val = BitCast<uint64_t, double>(val);
+ if (int_val == 0) {
+ xorps(dst, dst);
+ } else {
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
+ push(Immediate(upper));
+ push(Immediate(lower));
+ movsd(dst, Operand(esp, 0));
+ add(esp, Immediate(kDoubleSize));
+ }
+}
+
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
@@ -2980,16 +2976,8 @@ void MacroAssembler::CheckStackAlignment() {
void MacroAssembler::Abort(BailoutReason reason) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -3002,16 +2990,15 @@ void MacroAssembler::Abort(BailoutReason reason) {
#endif
push(eax);
- push(Immediate(p0));
- push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
+ push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason))));
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// will not return here
int3();
@@ -3034,9 +3021,9 @@ void MacroAssembler::Throw(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
} else {
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
}
// will not return here
int3();
@@ -3647,6 +3634,22 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
j(not_equal, &loop_again);
}
+
+void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
+ ASSERT(!dividend.is(eax));
+ ASSERT(!dividend.is(edx));
+ MultiplierAndShift ms(divisor);
+ mov(eax, Immediate(ms.multiplier()));
+ imul(dividend);
+ if (divisor > 0 && ms.multiplier() < 0) add(edx, dividend);
+ if (divisor < 0 && ms.multiplier() > 0) sub(edx, dividend);
+ if (ms.shift() > 0) sar(edx, ms.shift());
+ mov(eax, dividend);
+ shr(eax, 31);
+ add(edx, eax);
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 6807d082d..698c81fe8 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -262,14 +262,6 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
- void LoadGlobalContext(Register global_context);
-
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
@@ -295,7 +287,7 @@ class MacroAssembler: public Assembler {
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
- Set(result, Immediate(object));
+ Move(result, Immediate(object));
}
}
@@ -358,9 +350,6 @@ class MacroAssembler: public Assembler {
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
// Expression support
- void Set(Register dst, const Immediate& x);
- void Set(const Operand& dst, const Immediate& x);
-
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
// xorps to clear the dst register before cvtsi2sd to solve this issue.
@@ -369,7 +358,7 @@ class MacroAssembler: public Assembler {
// Support for constant splitting.
bool IsUnsafeImmediate(const Immediate& x);
- void SafeSet(Register dst, const Immediate& x);
+ void SafeMove(Register dst, const Immediate& x);
void SafePush(const Immediate& x);
// Compare object type for heap object.
@@ -557,6 +546,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object);
+
// ---------------------------------------------------------------------------
// Exception handling
@@ -851,6 +844,13 @@ class MacroAssembler: public Assembler {
// Move if the registers are not identical.
void Move(Register target, Register source);
+ // Move a constant into a destination using the most efficient encoding.
+ void Move(Register dst, const Immediate& x);
+ void Move(const Operand& dst, const Immediate& x);
+
+ // Move an immediate into an XMM register.
+ void Move(XMMRegister dst, double val);
+
// Push a handle value.
void Push(Handle<Object> handle) { push(Immediate(handle)); }
void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
@@ -863,6 +863,10 @@ class MacroAssembler: public Assembler {
// Insert code to verify that the x87 stack has the specified depth (0-7)
void VerifyX87StackDepth(uint32_t depth);
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged, the result is in edx, and eax gets clobbered.
+ void TruncatingDiv(Register dividend, int32_t divisor);
+
// ---------------------------------------------------------------------------
// StatsCounter support
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index d371c456c..255df3285 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -632,7 +632,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
void RegExpMacroAssemblerIA32::Fail() {
STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero.
if (!global()) {
- __ Set(eax, Immediate(FAILURE));
+ __ Move(eax, Immediate(FAILURE));
}
__ jmp(&exit_label_);
}
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index a5b93b9b2..1a745c7b7 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -283,7 +283,7 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
__ j(not_equal, miss);
// Load its initial map. The global functions all have initial maps.
- __ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
+ __ Move(prototype, Immediate(Handle<Map>(function->initial_map())));
// Load the prototype from the initial map.
__ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
}
@@ -306,54 +306,6 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
-// Generate code to check if an object is a string. If the object is
-// a string, the map's instance type is left in the scratch register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* smi,
- Label* non_string_object) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ test(scratch, Immediate(kNotStringTag));
- __ j(not_zero, non_string_object);
-}
-
-
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
-
- // Load length from the string and convert to a smi.
- __ mov(eax, FieldOperand(receiver, String::kLengthOffset));
- __ ret(0);
-
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, JS_VALUE_TYPE);
- __ j(not_equal, miss);
-
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
-}
-
-
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@@ -371,7 +323,7 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
bool inobject,
int index,
Representation representation) {
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -422,13 +374,14 @@ static void CompileCallLoadPropertyWithInterceptor(
// This function uses push() to generate smaller, faster code than
// the version above. It is an optimization that should will be removed
// when api call ICs are generated in hydrogen.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map,
- Register receiver,
- Register scratch_in,
- int argc,
- Register* values) {
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
// Copy return value.
__ pop(scratch_in);
// receiver
@@ -493,7 +446,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ mov(api_function_address, Immediate(function_address));
// Jump to stub.
- CallApiFunctionStub stub(true, call_data_undefined, argc);
+ CallApiFunctionStub stub(is_store, call_data_undefined, argc);
__ TailCallStub(&stub);
}
@@ -572,11 +525,11 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
__ CmpObject(value_reg, constant);
__ j(not_equal, miss_label);
- } else if (FLAG_track_fields && representation.IsSmi()) {
+ } else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
Label do_store, heap_number;
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow);
@@ -667,15 +620,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ mov(FieldOperand(receiver_reg, offset), storage_reg);
} else {
__ mov(FieldOperand(receiver_reg, offset), value_reg);
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
@@ -691,15 +644,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ mov(FieldOperand(scratch1, offset), storage_reg);
} else {
__ mov(FieldOperand(scratch1, offset), value_reg);
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
@@ -742,11 +695,11 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -793,7 +746,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
return;
}
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
// TODO(verwaest): Share this code as a code stub.
SmiCheck smi_check = representation.IsTagged()
? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
@@ -802,7 +755,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
int offset = object->map()->instance_size() + (index * kPointerSize);
__ mov(FieldOperand(receiver_reg, offset), value_reg);
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ mov(name_reg, value_reg);
@@ -821,7 +774,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
__ mov(FieldOperand(scratch1, offset), value_reg);
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ mov(name_reg, value_reg);
@@ -860,9 +813,6 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
- // Make sure that the type feedback oracle harvests the receiver map.
- // TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ mov(scratch1, receiver_map);
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -1066,15 +1016,6 @@ void LoadStubCompiler::GenerateLoadField(Register reg,
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization,
- Handle<Map> receiver_map) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver_map,
- receiver(), scratch1(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
@@ -1263,24 +1204,6 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, handle(object->map()),
- receiver(), scratch1(), 1, values);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
@@ -1288,30 +1211,26 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
- Register receiver = edx;
- Register value = eax;
// Save value register, so we can restore it later.
- __ push(value);
+ __ push(value());
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
__ mov(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
+ FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
}
__ push(receiver);
- __ push(value);
+ __ push(value());
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
@@ -1355,6 +1274,20 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
}
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ pop(scratch1()); // remove the return address
+ __ push(receiver());
+ __ push(value());
+ __ push(scratch1()); // restore return address
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
@@ -1412,16 +1345,21 @@ Register* KeyedLoadStubCompiler::registers() {
}
+Register StoreStubCompiler::value() {
+ return eax;
+}
+
+
Register* StoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { edx, ecx, eax, ebx, edi, no_reg };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { edx, ecx, ebx, edi, no_reg };
return registers;
}
Register* KeyedStoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { edx, ecx, eax, ebx, edi, no_reg };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { edx, ecx, ebx, edi, no_reg };
return registers;
}
diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic-inl.h
index e0f807ce4..ebe0fb9b3 100644
--- a/deps/v8/src/ic-inl.h
+++ b/deps/v8/src/ic-inl.h
@@ -50,12 +50,20 @@ Address IC::address() const {
// At least one break point is active perform additional test to ensure that
// break point locations are updated correctly.
- if (debug->IsDebugBreak(Assembler::target_address_at(result))) {
+ if (debug->IsDebugBreak(Assembler::target_address_at(result,
+ raw_constant_pool()))) {
// If the call site is a call to debug break then return the address in
// the original code instead of the address in the running code. This will
// cause the original code to be updated and keeps the breakpoint active in
// the running code.
- return OriginalCodeAddress();
+ Code* code = GetCode();
+ Code* original_code = GetOriginalCode();
+ intptr_t delta =
+ original_code->instruction_start() - code->instruction_start();
+ // Return the address in the original code. This is the place where
+ // the call which has been overwritten by the DebugBreakXXX resides
+ // and the place where the inline cache system should look.
+ return result + delta;
} else {
// No break point here just return the address of the call.
return result;
@@ -66,9 +74,45 @@ Address IC::address() const {
}
-Code* IC::GetTargetAtAddress(Address address) {
+ConstantPoolArray* IC::constant_pool() const {
+ if (!FLAG_enable_ool_constant_pool) {
+ return NULL;
+ } else {
+ Handle<ConstantPoolArray> result = raw_constant_pool_;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ Debug* debug = isolate()->debug();
+ // First check if any break points are active if not just return the
+ // original constant pool.
+ if (!debug->has_break_points()) return *result;
+
+ // At least one break point is active perform additional test to ensure that
+ // break point locations are updated correctly.
+ Address target = Assembler::target_address_from_return_address(pc());
+ if (debug->IsDebugBreak(
+ Assembler::target_address_at(target, raw_constant_pool()))) {
+ // If the call site is a call to debug break then we want to return the
+ // constant pool for the original code instead of the breakpointed code.
+ return GetOriginalCode()->constant_pool();
+ }
+#endif
+ return *result;
+ }
+}
+
+
+ConstantPoolArray* IC::raw_constant_pool() const {
+ if (FLAG_enable_ool_constant_pool) {
+ return *raw_constant_pool_;
+ } else {
+ return NULL;
+ }
+}
+
+
+Code* IC::GetTargetAtAddress(Address address,
+ ConstantPoolArray* constant_pool) {
// Get the target address of the IC.
- Address target = Assembler::target_address_at(address);
+ Address target = Assembler::target_address_at(address, constant_pool);
// Convert target address to the code object. Code::GetCodeFromTargetAddress
// is safe for use during GC where the map might be marked.
Code* result = Code::GetCodeFromTargetAddress(target);
@@ -77,10 +121,12 @@ Code* IC::GetTargetAtAddress(Address address) {
}
-void IC::SetTargetAtAddress(Address address, Code* target) {
+void IC::SetTargetAtAddress(Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub());
Heap* heap = target->GetHeap();
- Code* old_target = GetTargetAtAddress(address);
+ Code* old_target = GetTargetAtAddress(address, constant_pool);
#ifdef DEBUG
// STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
// ICs as strict mode. The strict-ness of the IC must be preserved.
@@ -90,7 +136,8 @@ void IC::SetTargetAtAddress(Address address, Code* target) {
StoreIC::GetStrictMode(target->extra_ic_state()));
}
#endif
- Assembler::set_target_address_at(address, target->instruction_start());
+ Assembler::set_target_address_at(
+ address, constant_pool, target->instruction_start());
if (heap->gc_state() == Heap::MARK_COMPACT) {
heap->mark_compact_collector()->RecordCodeTargetPatch(address, target);
} else {
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 1e7997a80..a32717362 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -127,6 +127,11 @@ IC::IC(FrameDepth depth, Isolate* isolate)
// running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
const Address entry =
Isolate::c_entry_fp(isolate->thread_local_top());
+ Address constant_pool = NULL;
+ if (FLAG_enable_ool_constant_pool) {
+ constant_pool = Memory::Address_at(
+ entry + ExitFrameConstants::kConstantPoolOffset);
+ }
Address* pc_address =
reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
@@ -134,6 +139,10 @@ IC::IC(FrameDepth depth, Isolate* isolate)
// StubFailureTrampoline, we need to look one frame further down the stack to
// find the frame pointer and the return address stack slot.
if (depth == EXTRA_CALL_FRAME) {
+ if (FLAG_enable_ool_constant_pool) {
+ constant_pool = Memory::Address_at(
+ fp + StandardFrameConstants::kConstantPoolOffset);
+ }
const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset;
pc_address = reinterpret_cast<Address*>(fp + kCallerPCOffset);
fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
@@ -145,18 +154,20 @@ IC::IC(FrameDepth depth, Isolate* isolate)
ASSERT(fp == frame->fp() && pc_address == frame->pc_address());
#endif
fp_ = fp;
+ if (FLAG_enable_ool_constant_pool) {
+ raw_constant_pool_ = handle(
+ ConstantPoolArray::cast(reinterpret_cast<Object*>(constant_pool)),
+ isolate);
+ }
pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
target_ = handle(raw_target(), isolate);
state_ = target_->ic_state();
- extra_ic_state_ = target_->needs_extended_extra_ic_state(target_->kind())
- ? target_->extended_extra_ic_state()
- : target_->extra_ic_state();
+ extra_ic_state_ = target_->extra_ic_state();
}
#ifdef ENABLE_DEBUGGER_SUPPORT
-Address IC::OriginalCodeAddress() const {
- HandleScope scope(isolate());
+SharedFunctionInfo* IC::GetSharedFunctionInfo() const {
// Compute the JavaScript frame for the frame pointer of this IC
// structure. We need this to be able to find the function
// corresponding to the frame.
@@ -166,21 +177,25 @@ Address IC::OriginalCodeAddress() const {
// Find the function on the stack and both the active code for the
// function and the original code.
JSFunction* function = frame->function();
- Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+ return function->shared();
+}
+
+
+Code* IC::GetCode() const {
+ HandleScope scope(isolate());
+ Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate());
Code* code = shared->code();
+ return code;
+}
+
+
+Code* IC::GetOriginalCode() const {
+ HandleScope scope(isolate());
+ Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate());
ASSERT(Debug::HasDebugInfo(shared));
Code* original_code = Debug::GetDebugInfo(shared)->original_code();
ASSERT(original_code->IsCode());
- // Get the address of the call site in the active code. This is the
- // place where the call to DebugBreakXXX is and where the IC
- // normally would be.
- Address addr = Assembler::target_address_from_return_address(pc());
- // Return the address in the original code. This is the place where
- // the call which has been overwritten by the DebugBreakXXX resides
- // and the place where the inline cache system should look.
- intptr_t delta =
- original_code->instruction_start() - code->instruction_start();
- return addr + delta;
+ return original_code;
}
#endif
@@ -411,21 +426,26 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
}
-void IC::Clear(Isolate* isolate, Address address) {
- Code* target = GetTargetAtAddress(address);
+void IC::Clear(Isolate* isolate, Address address,
+ ConstantPoolArray* constant_pool) {
+ Code* target = GetTargetAtAddress(address, constant_pool);
// Don't clear debug break inline cache as it will remove the break point.
if (target->is_debug_stub()) return;
switch (target->kind()) {
- case Code::LOAD_IC: return LoadIC::Clear(isolate, address, target);
+ case Code::LOAD_IC:
+ return LoadIC::Clear(isolate, address, target, constant_pool);
case Code::KEYED_LOAD_IC:
- return KeyedLoadIC::Clear(isolate, address, target);
- case Code::STORE_IC: return StoreIC::Clear(isolate, address, target);
+ return KeyedLoadIC::Clear(isolate, address, target, constant_pool);
+ case Code::STORE_IC:
+ return StoreIC::Clear(isolate, address, target, constant_pool);
case Code::KEYED_STORE_IC:
- return KeyedStoreIC::Clear(isolate, address, target);
- case Code::COMPARE_IC: return CompareIC::Clear(isolate, address, target);
- case Code::COMPARE_NIL_IC: return CompareNilIC::Clear(address, target);
+ return KeyedStoreIC::Clear(isolate, address, target, constant_pool);
+ case Code::COMPARE_IC:
+ return CompareIC::Clear(isolate, address, target, constant_pool);
+ case Code::COMPARE_NIL_IC:
+ return CompareNilIC::Clear(address, target, constant_pool);
case Code::BINARY_OP_IC:
case Code::TO_BOOLEAN_IC:
// Clearing these is tricky and does not
@@ -436,40 +456,56 @@ void IC::Clear(Isolate* isolate, Address address) {
}
-void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target) {
+void KeyedLoadIC::Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
// Make sure to also clear the map used in inline fast cases. If we
// do not clear these maps, cached code can keep objects alive
// through the embedded maps.
- SetTargetAtAddress(address, *pre_monomorphic_stub(isolate));
+ SetTargetAtAddress(address, *pre_monomorphic_stub(isolate), constant_pool);
}
-void LoadIC::Clear(Isolate* isolate, Address address, Code* target) {
+void LoadIC::Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
Code* code = target->GetIsolate()->stub_cache()->FindPreMonomorphicIC(
Code::LOAD_IC, target->extra_ic_state());
- SetTargetAtAddress(address, code);
+ SetTargetAtAddress(address, code, constant_pool);
}
-void StoreIC::Clear(Isolate* isolate, Address address, Code* target) {
+void StoreIC::Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
Code* code = target->GetIsolate()->stub_cache()->FindPreMonomorphicIC(
Code::STORE_IC, target->extra_ic_state());
- SetTargetAtAddress(address, code);
+ SetTargetAtAddress(address, code, constant_pool);
}
-void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target) {
+void KeyedStoreIC::Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
SetTargetAtAddress(address,
*pre_monomorphic_stub(
- isolate, StoreIC::GetStrictMode(target->extra_ic_state())));
+ isolate, StoreIC::GetStrictMode(target->extra_ic_state())),
+ constant_pool);
}
-void CompareIC::Clear(Isolate* isolate, Address address, Code* target) {
+void CompareIC::Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
ASSERT(target->major_key() == CodeStub::CompareIC);
CompareIC::State handler_state;
Token::Value op;
@@ -477,7 +513,7 @@ void CompareIC::Clear(Isolate* isolate, Address address, Code* target) {
&handler_state, &op);
// Only clear CompareICs that can retain objects.
if (handler_state != KNOWN_OBJECT) return;
- SetTargetAtAddress(address, GetRawUninitialized(isolate, op));
+ SetTargetAtAddress(address, GetRawUninitialized(isolate, op), constant_pool);
PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
}
@@ -500,31 +536,6 @@ MaybeObject* LoadIC::Load(Handle<Object> object,
}
if (FLAG_use_ic) {
- // Use specialized code for getting the length of strings and
- // string wrapper objects. The length property of string wrapper
- // objects is read-only and therefore always returns the length of
- // the underlying string value. See ECMA-262 15.5.5.1.
- if (object->IsStringWrapper() &&
- name->Equals(isolate()->heap()->length_string())) {
- Handle<Code> stub;
- if (state() == UNINITIALIZED) {
- stub = pre_monomorphic_stub();
- } else if (state() == PREMONOMORPHIC || state() == MONOMORPHIC) {
- StringLengthStub string_length_stub(kind());
- stub = string_length_stub.GetCode(isolate());
- } else if (state() != MEGAMORPHIC) {
- ASSERT(state() != GENERIC);
- stub = megamorphic_stub();
- }
- if (!stub.is_null()) {
- set_target(*stub);
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /stringwrapper]\n");
- }
- // Get the string if we have a string wrapper object.
- String* string = String::cast(JSValue::cast(*object)->value());
- return Smi::FromInt(string->length());
- }
-
// Use specialized code for getting prototype of functions.
if (object->IsJSFunction() &&
name->Equals(isolate()->heap()->prototype_string()) &&
@@ -553,7 +564,10 @@ MaybeObject* LoadIC::Load(Handle<Object> object,
if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) {
// Rewrite to the generic keyed load stub.
if (FLAG_use_ic) set_target(*generic_stub());
- return Runtime::GetElementOrCharAtOrFail(isolate(), object, index);
+ Handle<Object> result =
+ Runtime::GetElementOrCharAt(isolate(), object, index);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
@@ -610,28 +624,33 @@ bool IC::UpdatePolymorphicIC(Handle<HeapType> type,
TypeHandleList types;
CodeHandleList handlers;
- int number_of_valid_types;
- int handler_to_overwrite = -1;
-
target()->FindAllTypes(&types);
int number_of_types = types.length();
- number_of_valid_types = number_of_types;
+ int deprecated_types = 0;
+ int handler_to_overwrite = -1;
for (int i = 0; i < number_of_types; i++) {
Handle<HeapType> current_type = types.at(i);
- // Filter out deprecated maps to ensure their instances get migrated.
if (current_type->IsClass() && current_type->AsClass()->is_deprecated()) {
- number_of_valid_types--;
- // If the receiver type is already in the polymorphic IC, this indicates
- // there was a prototoype chain failure. In that case, just overwrite the
- // handler.
+ // Filter out deprecated maps to ensure their instances get migrated.
+ ++deprecated_types;
} else if (type->IsCurrently(current_type)) {
- ASSERT(handler_to_overwrite == -1);
- number_of_valid_types--;
+ // If the receiver type is already in the polymorphic IC, this indicates
+ // there was a prototoype chain failure. In that case, just overwrite the
+ // handler.
+ handler_to_overwrite = i;
+ } else if (handler_to_overwrite == -1 &&
+ current_type->IsClass() &&
+ type->IsClass() &&
+ IsTransitionOfMonomorphicTarget(*current_type->AsClass(),
+ *type->AsClass())) {
handler_to_overwrite = i;
}
}
+ int number_of_valid_types =
+ number_of_types - deprecated_types - (handler_to_overwrite != -1);
+
if (number_of_valid_types >= 4) return false;
if (number_of_types == 0) return false;
if (!target()->FindHandlers(&handlers, types.length())) return false;
@@ -639,13 +658,16 @@ bool IC::UpdatePolymorphicIC(Handle<HeapType> type,
number_of_valid_types++;
if (handler_to_overwrite >= 0) {
handlers.Set(handler_to_overwrite, code);
+ if (!type->IsCurrently(types.at(handler_to_overwrite))) {
+ types.Set(handler_to_overwrite, type);
+ }
} else {
types.Add(type);
handlers.Add(code);
}
Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC(
- &types, &handlers, number_of_valid_types, name, extra_ic_state());
+ kind(), &types, &handlers, number_of_valid_types, name, extra_ic_state());
set_target(*ic);
return true;
}
@@ -697,7 +719,7 @@ void IC::UpdateMonomorphicIC(Handle<HeapType> type,
Handle<String> name) {
if (!handler->is_handler()) return set_target(*handler);
Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicIC(
- name, type, handler, extra_ic_state());
+ kind(), name, type, handler, extra_ic_state());
set_target(*ic);
}
@@ -713,19 +735,18 @@ void IC::CopyICToMegamorphicCache(Handle<String> name) {
}
-bool IC::IsTransitionOfMonomorphicTarget(Handle<HeapType> type) {
- if (!type->IsClass()) return false;
- Map* receiver_map = *type->AsClass();
- Map* current_map = target()->FindFirstMap();
- ElementsKind receiver_elements_kind = receiver_map->elements_kind();
+bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) {
+ if (source_map == NULL) return true;
+ if (target_map == NULL) return false;
+ ElementsKind target_elements_kind = target_map->elements_kind();
bool more_general_transition =
IsMoreGeneralElementsKindTransition(
- current_map->elements_kind(), receiver_elements_kind);
+ source_map->elements_kind(), target_elements_kind);
Map* transitioned_map = more_general_transition
- ? current_map->LookupElementsTransitionMap(receiver_elements_kind)
+ ? source_map->LookupElementsTransitionMap(target_elements_kind)
: NULL;
- return transitioned_map == receiver_map;
+ return transitioned_map == target_map;
}
@@ -738,17 +759,7 @@ void IC::PatchCache(Handle<HeapType> type,
case MONOMORPHIC_PROTOTYPE_FAILURE:
UpdateMonomorphicIC(type, code, name);
break;
- case MONOMORPHIC: {
- // For now, call stubs are allowed to rewrite to the same stub. This
- // happens e.g., when the field does not contain a function.
- ASSERT(!target().is_identical_to(code));
- Code* old_handler = target()->FindFirstHandler();
- if (old_handler == *code && IsTransitionOfMonomorphicTarget(type)) {
- UpdateMonomorphicIC(type, code, name);
- break;
- }
- // Fall through.
- }
+ case MONOMORPHIC: // Fall through.
case POLYMORPHIC:
if (!target()->is_keyed_stub()) {
if (UpdatePolymorphicIC(type, name, code)) break;
@@ -847,8 +858,11 @@ Handle<Code> IC::ComputeHandler(LookupResult* lookup,
isolate(), *object, cache_holder));
Handle<Code> code = isolate()->stub_cache()->FindHandler(
- name, handle(stub_holder->map()), kind(), cache_holder);
- if (!code.is_null()) return code;
+ name, handle(stub_holder->map()), kind(), cache_holder,
+ lookup->holder()->HasFastProperties() ? Code::FAST : Code::NORMAL);
+ if (!code.is_null()) {
+ return code;
+ }
code = CompileHandler(lookup, object, name, value, cache_holder);
ASSERT(code->is_handler());
@@ -871,6 +885,17 @@ Handle<Code> LoadIC::CompileHandler(LookupResult* lookup,
return SimpleFieldLoad(length_index);
}
+ if (object->IsStringWrapper() &&
+ name->Equals(isolate()->heap()->length_string())) {
+ if (kind() == Code::LOAD_IC) {
+ StringLengthStub string_length_stub;
+ return string_length_stub.GetCode(isolate());
+ } else {
+ KeyedStringLengthStub string_length_stub;
+ return string_length_stub.GetCode(isolate());
+ }
+ }
+
Handle<HeapType> type = CurrentTypeOf(object, isolate());
Handle<JSObject> holder(lookup->holder());
LoadStubCompiler compiler(isolate(), kNoExtraICState, cache_holder, kind());
@@ -942,8 +967,8 @@ Handle<Code> LoadIC::CompileHandler(LookupResult* lookup,
Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
if (!object->IsJSObject() &&
!function->IsBuiltin() &&
- function->shared()->is_classic_mode()) {
- // Calling non-strict non-builtins with a value as the receiver
+ function->shared()->strict_mode() == SLOPPY) {
+ // Calling sloppy non-builtins with a value as the receiver
// requires boxing.
break;
}
@@ -1063,26 +1088,25 @@ MaybeObject* KeyedLoadIC::Load(Handle<Object> object, Handle<Object> key) {
MaybeObject* maybe_object = NULL;
Handle<Code> stub = generic_stub();
- // Check for values that can be converted into an internalized string directly
- // or is representable as a smi.
+ // Check for non-string values that can be converted into an
+ // internalized string directly or is representable as a smi.
key = TryConvertKey(key, isolate());
if (key->IsInternalizedString()) {
maybe_object = LoadIC::Load(object, Handle<String>::cast(key));
if (maybe_object->IsFailure()) return maybe_object;
} else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
- ASSERT(!object->IsJSGlobalProxy());
if (object->IsString() && key->IsNumber()) {
if (state() == UNINITIALIZED) stub = string_stub();
} else if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->elements()->map() ==
- isolate()->heap()->non_strict_arguments_elements_map()) {
- stub = non_strict_arguments_stub();
+ isolate()->heap()->sloppy_arguments_elements_map()) {
+ stub = sloppy_arguments_stub();
} else if (receiver->HasIndexedInterceptor()) {
stub = indexed_interceptor_stub();
} else if (!key->ToSmi()->IsFailure() &&
- (!target().is_identical_to(non_strict_arguments_stub()))) {
+ (!target().is_identical_to(sloppy_arguments_stub()))) {
stub = LoadElementStub(receiver);
}
}
@@ -1092,7 +1116,6 @@ MaybeObject* KeyedLoadIC::Load(Handle<Object> object, Handle<Object> key) {
if (*stub == *generic_stub()) {
TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
}
- ASSERT(!stub.is_null());
set_target(*stub);
TRACE_IC("LoadIC", key);
}
@@ -1110,22 +1133,20 @@ static bool LookupForWrite(Handle<JSObject> receiver,
Handle<JSObject> holder = receiver;
receiver->Lookup(*name, lookup);
if (lookup->IsFound()) {
- if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false;
-
- if (lookup->holder() == *receiver) {
- if (lookup->IsInterceptor() && !HasInterceptorSetter(*receiver)) {
- receiver->LocalLookupRealNamedProperty(*name, lookup);
- return lookup->IsFound() &&
- !lookup->IsReadOnly() &&
- lookup->CanHoldValue(value) &&
- lookup->IsCacheable();
- }
- return lookup->CanHoldValue(value);
+ if (lookup->IsInterceptor() && !HasInterceptorSetter(lookup->holder())) {
+ receiver->LocalLookupRealNamedProperty(*name, lookup);
+ if (!lookup->IsFound()) return false;
}
+ if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false;
+ if (lookup->holder() == *receiver) return lookup->CanHoldValue(value);
if (lookup->IsPropertyCallbacks()) return true;
- // JSGlobalProxy always goes via the runtime, so it's safe to cache.
- if (receiver->IsJSGlobalProxy()) return true;
+ // JSGlobalProxy either stores on the global object in the prototype, or
+ // goes into the runtime if access checks are needed, so this is always
+ // safe.
+ if (receiver->IsJSGlobalProxy()) {
+ return lookup->holder() == receiver->GetPrototype();
+ }
// Currently normal holders in the prototype chain are not supported. They
// would require a runtime positive lookup and verification that the details
// have not changed.
@@ -1183,7 +1204,7 @@ MaybeObject* StoreIC::Store(Handle<Object> object,
}
// The length property of string values is read-only. Throw in strict mode.
- if (strict_mode() == kStrictMode && object->IsString() &&
+ if (strict_mode() == STRICT && object->IsString() &&
name->Equals(isolate()->heap()->length_string())) {
return TypeError("strict_read_only_property", object, name);
}
@@ -1204,27 +1225,7 @@ MaybeObject* StoreIC::Store(Handle<Object> object,
}
// Observed objects are always modified through the runtime.
- if (FLAG_harmony_observation && receiver->map()->is_observed()) {
- Handle<Object> result = JSReceiver::SetProperty(
- receiver, name, value, NONE, strict_mode(), store_mode);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- return *result;
- }
-
- // Use specialized code for setting the length of arrays with fast
- // properties. Slow properties might indicate redefinition of the length
- // property. Note that when redefined using Object.freeze, it's possible
- // to have fast properties but a read-only length.
- if (FLAG_use_ic &&
- receiver->IsJSArray() &&
- name->Equals(isolate()->heap()->length_string()) &&
- Handle<JSArray>::cast(receiver)->AllowsSetElementsLength() &&
- receiver->HasFastProperties() &&
- !receiver->map()->is_frozen()) {
- Handle<Code> stub =
- StoreArrayLengthStub(kind(), strict_mode()).GetCode(isolate());
- set_target(*stub);
- TRACE_IC("StoreIC", name);
+ if (receiver->map()->is_observed()) {
Handle<Object> result = JSReceiver::SetProperty(
receiver, name, value, NONE, strict_mode(), store_mode);
RETURN_IF_EMPTY_HANDLE(isolate(), result);
@@ -1234,7 +1235,7 @@ MaybeObject* StoreIC::Store(Handle<Object> object,
LookupResult lookup(isolate());
bool can_store = LookupForWrite(receiver, name, value, &lookup, this);
if (!can_store &&
- strict_mode() == kStrictMode &&
+ strict_mode() == STRICT &&
!(lookup.IsProperty() && lookup.IsReadOnly()) &&
object->IsGlobalObject()) {
// Strict mode doesn't allow setting non-existent global property.
@@ -1264,7 +1265,7 @@ MaybeObject* StoreIC::Store(Handle<Object> object,
Handle<Code> StoreIC::initialize_stub(Isolate* isolate,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
ExtraICState extra_state = ComputeExtraICState(strict_mode);
Handle<Code> ic = isolate->stub_cache()->ComputeStore(
UNINITIALIZED, extra_state);
@@ -1283,7 +1284,7 @@ Handle<Code> StoreIC::generic_stub() const {
Handle<Code> StoreIC::pre_monomorphic_stub(Isolate* isolate,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
ExtraICState state = ComputeExtraICState(strict_mode);
return isolate->stub_cache()->ComputeStore(PREMONOMORPHIC, state);
}
@@ -1310,14 +1311,14 @@ Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
Handle<String> name,
Handle<Object> value,
InlineCacheHolderFlag cache_holder) {
- if (object->IsJSGlobalProxy()) return slow_stub();
+ if (object->IsAccessCheckNeeded()) return slow_stub();
ASSERT(cache_holder == OWN_MAP);
// This is currently guaranteed by checks in StoreIC::Store.
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
Handle<JSObject> holder(lookup->holder());
// Handlers do not use strict mode.
- StoreStubCompiler compiler(isolate(), kNonStrictMode, kind());
+ StoreStubCompiler compiler(isolate(), SLOPPY, kind());
switch (lookup->type()) {
case FIELD:
return compiler.CompileStoreField(receiver, lookup, name);
@@ -1334,17 +1335,19 @@ Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
}
case NORMAL:
if (kind() == Code::KEYED_STORE_IC) break;
- if (receiver->IsGlobalObject()) {
+ if (receiver->IsJSGlobalProxy() || receiver->IsGlobalObject()) {
// The stub generated for the global object picks the value directly
// from the property cell. So the property must be directly on the
// global object.
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
+ Handle<GlobalObject> global = receiver->IsJSGlobalProxy()
+ ? handle(GlobalObject::cast(receiver->GetPrototype()))
+ : Handle<GlobalObject>::cast(receiver);
Handle<PropertyCell> cell(global->GetPropertyCell(lookup), isolate());
Handle<HeapType> union_type = PropertyCell::UpdatedType(cell, value);
- StoreGlobalStub stub(union_type->IsConstant());
-
+ StoreGlobalStub stub(
+ union_type->IsConstant(), receiver->IsJSGlobalProxy());
Handle<Code> code = stub.GetCodeCopyFromTemplate(
- isolate(), receiver->map(), *cell);
+ isolate(), global, cell);
// TODO(verwaest): Move caching of these NORMAL stubs outside as well.
HeapObject::UpdateMapCodeCache(receiver, name, code);
return code;
@@ -1352,7 +1355,6 @@ Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
ASSERT(holder.is_identical_to(receiver));
return isolate()->builtins()->StoreIC_Normal();
case CALLBACKS: {
- if (kind() == Code::KEYED_STORE_IC) break;
Handle<Object> callback(lookup->GetCallbackObject(), isolate());
if (callback->IsExecutableAccessorInfo()) {
Handle<ExecutableAccessorInfo> info =
@@ -1380,12 +1382,23 @@ Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
// TODO(dcarney): Handle correctly.
if (callback->IsDeclaredAccessorInfo()) break;
ASSERT(callback->IsForeign());
+
+ // Use specialized code for setting the length of arrays with fast
+ // properties. Slow properties might indicate redefinition of the length
+ // property.
+ if (receiver->IsJSArray() &&
+ name->Equals(isolate()->heap()->length_string()) &&
+ Handle<JSArray>::cast(receiver)->AllowsSetElementsLength() &&
+ receiver->HasFastProperties()) {
+ return compiler.CompileStoreArrayLength(receiver, lookup, name);
+ }
+
// No IC support for old-style native accessors.
break;
}
case INTERCEPTOR:
if (kind() == Code::KEYED_STORE_IC) break;
- ASSERT(HasInterceptorSetter(*receiver));
+ ASSERT(HasInterceptorSetter(*holder));
return compiler.CompileStoreInterceptor(receiver, name);
case CONSTANT:
break;
@@ -1439,9 +1452,10 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
if (IsTransitionStoreMode(store_mode)) {
transitioned_receiver_map = ComputeTransitionedMap(receiver, store_mode);
}
- if (receiver_map.is_identical_to(previous_receiver_map) ||
- IsTransitionOfMonomorphicTarget(
- MapToType<HeapType>(transitioned_receiver_map, isolate()))) {
+ if ((receiver_map.is_identical_to(previous_receiver_map) &&
+ IsTransitionStoreMode(store_mode)) ||
+ IsTransitionOfMonomorphicTarget(*previous_receiver_map,
+ *transitioned_receiver_map)) {
// If the "old" and "new" maps are in the same elements map family, or
// if they at least come from the same origin for a transitioning store,
// stay MONOMORPHIC and use the map for the most generic ElementsKind.
@@ -1575,7 +1589,10 @@ KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver,
key->ToSmi()->To(&smi_key);
int index = smi_key->value();
bool oob_access = IsOutOfBoundsAccess(receiver, index);
- bool allow_growth = receiver->IsJSArray() && oob_access;
+ // Don't consider this a growing store if the store would send the receiver to
+ // dictionary mode.
+ bool allow_growth = receiver->IsJSArray() && oob_access &&
+ !receiver->WouldConvertToSlowElements(key);
if (allow_growth) {
// Handle growing array in stub if necessary.
if (receiver->HasFastSmiElements()) {
@@ -1655,8 +1672,8 @@ MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
return *result;
}
- // Check for values that can be converted into an internalized string directly
- // or is representable as a smi.
+ // Check for non-string values that can be converted into an
+ // internalized string directly or is representable as a smi.
key = TryConvertKey(key, isolate());
MaybeObject* maybe_object = NULL;
@@ -1669,8 +1686,10 @@ MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
JSReceiver::MAY_BE_STORE_FROM_KEYED);
if (maybe_object->IsFailure()) return maybe_object;
} else {
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded() &&
- !(FLAG_harmony_observation && object->IsJSObject() &&
+ bool use_ic = FLAG_use_ic &&
+ !object->IsAccessCheckNeeded() &&
+ !object->IsJSGlobalProxy() &&
+ !(object->IsJSObject() &&
JSObject::cast(*object)->map()->is_observed());
if (use_ic && !object->IsSmi()) {
// Don't use ICs for maps of the objects in Array's prototype chain. We
@@ -1681,16 +1700,18 @@ MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
}
if (use_ic) {
- ASSERT(!object->IsJSGlobalProxy());
+ ASSERT(!object->IsAccessCheckNeeded());
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
bool key_is_smi_like = key->IsSmi() || !key->ToSmi()->IsFailure();
if (receiver->elements()->map() ==
- isolate()->heap()->non_strict_arguments_elements_map()) {
- stub = non_strict_arguments_stub();
+ isolate()->heap()->sloppy_arguments_elements_map()) {
+ if (strict_mode() == SLOPPY) {
+ stub = sloppy_arguments_stub();
+ }
} else if (key_is_smi_like &&
- !(target().is_identical_to(non_strict_arguments_stub()))) {
+ !(target().is_identical_to(sloppy_arguments_stub()))) {
// We should go generic if receiver isn't a dictionary, but our
// prototype chain does have dictionary elements. This ensures that
// other non-dictionary receivers in the polymorphic case benefit
@@ -1791,11 +1812,11 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure) {
RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- JSArray* receiver = JSArray::cast(args[0]);
- Object* len = args[1];
+ Handle<JSArray> receiver = args.at<JSArray>(0);
+ Handle<Object> len = args.at<Object>(1);
// The generated code should filter out non-Smis before we get here.
ASSERT(len->IsSmi());
@@ -1807,11 +1828,9 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
ASSERT(debug_lookup.IsPropertyCallbacks() && !debug_lookup.IsReadOnly());
#endif
- Object* result;
- MaybeObject* maybe_result = receiver->SetElementsLength(len);
- if (!maybe_result->To(&result)) return maybe_result;
-
- return len;
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSArray::SetElementsLength(receiver, len));
+ return *len;
}
@@ -1843,14 +1862,12 @@ RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
Object* to_store = value;
- if (FLAG_track_double_fields) {
- DescriptorArray* descriptors = transition->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(transition->LastAdded());
- if (details.representation().IsDouble()) {
- MaybeObject* maybe_storage =
- isolate->heap()->AllocateHeapNumber(value->Number());
- if (!maybe_storage->To(&to_store)) return maybe_storage;
- }
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(transition->LastAdded());
+ if (details.representation().IsDouble()) {
+ MaybeObject* maybe_storage =
+ isolate->heap()->AllocateHeapNumber(value->Number());
+ if (!maybe_storage->To(&to_store)) return maybe_storage;
}
new_storage->set(old_storage->length(), to_store);
@@ -1894,7 +1911,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_Slow) {
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- StrictModeFlag strict_mode = ic.strict_mode();
+ StrictMode strict_mode = ic.strict_mode();
Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key,
value,
NONE,
@@ -1911,7 +1928,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- StrictModeFlag strict_mode = ic.strict_mode();
+ StrictMode strict_mode = ic.strict_mode();
Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key,
value,
NONE,
@@ -1929,7 +1946,7 @@ RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss) {
Handle<Map> map = args.at<Map>(1);
Handle<Object> key = args.at<Object>(2);
Handle<Object> object = args.at<Object>(3);
- StrictModeFlag strict_mode = ic.strict_mode();
+ StrictMode strict_mode = ic.strict_mode();
if (object->IsJSObject()) {
JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
map->elements_kind());
@@ -2352,7 +2369,7 @@ const char* BinaryOpIC::State::KindToString(Kind kind) {
Type* BinaryOpIC::State::KindToType(Kind kind, Zone* zone) {
switch (kind) {
case NONE: return Type::None(zone);
- case SMI: return Type::Smi(zone);
+ case SMI: return Type::SignedSmall(zone);
case INT32: return Type::Signed32(zone);
case NUMBER: return Type::Number(zone);
case STRING: return Type::String(zone);
@@ -2366,7 +2383,7 @@ Type* BinaryOpIC::State::KindToType(Kind kind, Zone* zone) {
MaybeObject* BinaryOpIC::Transition(Handle<AllocationSite> allocation_site,
Handle<Object> left,
Handle<Object> right) {
- State state(target()->extended_extra_ic_state());
+ State state(target()->extra_ic_state());
// Compute the actual result using the builtin for the binary operation.
Object* builtin = isolate()->js_builtins_object()->javascript_builtin(
@@ -2377,8 +2394,11 @@ MaybeObject* BinaryOpIC::Transition(Handle<AllocationSite> allocation_site,
isolate(), function, left, 1, &right, &caught_exception);
if (caught_exception) return Failure::Exception();
+ // Execution::Call can execute arbitrary JavaScript, hence potentially
+ // update the state of this very IC, so we must update the stored state.
+ UpdateTarget();
// Compute the new state.
- State old_state = state;
+ State old_state(target()->extra_ic_state());
state.Update(left, right, result);
// Check if we have a string operation here.
@@ -2495,7 +2515,7 @@ Type* CompareIC::StateToType(
Handle<Map> map) {
switch (state) {
case CompareIC::UNINITIALIZED: return Type::None(zone);
- case CompareIC::SMI: return Type::Smi(zone);
+ case CompareIC::SMI: return Type::SignedSmall(zone);
case CompareIC::NUMBER: return Type::Number(zone);
case CompareIC::STRING: return Type::String(zone);
case CompareIC::INTERNALIZED_STRING: return Type::InternalizedString(zone);
@@ -2680,9 +2700,11 @@ RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
}
-void CompareNilIC::Clear(Address address, Code* target) {
+void CompareNilIC::Clear(Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool) {
if (IsCleared(target)) return;
- ExtraICState state = target->extended_extra_ic_state();
+ ExtraICState state = target->extra_ic_state();
CompareNilICStub stub(state, HydrogenCodeStub::UNINITIALIZED);
stub.ClearState();
@@ -2690,7 +2712,7 @@ void CompareNilIC::Clear(Address address, Code* target) {
Code* code = NULL;
CHECK(stub.FindCodeInCache(&code, target->GetIsolate()));
- SetTargetAtAddress(address, code);
+ SetTargetAtAddress(address, code, constant_pool);
}
@@ -2704,7 +2726,7 @@ MaybeObject* CompareNilIC::DoCompareNilSlow(NilValue nil,
MaybeObject* CompareNilIC::CompareNil(Handle<Object> object) {
- ExtraICState extra_ic_state = target()->extended_extra_ic_state();
+ ExtraICState extra_ic_state = target()->extra_ic_state();
CompareNilICStub stub(extra_ic_state);
@@ -2788,7 +2810,7 @@ Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op) {
MaybeObject* ToBooleanIC::ToBoolean(Handle<Object> object) {
- ToBooleanStub stub(target()->extended_extra_ic_state());
+ ToBooleanStub stub(target()->extra_ic_state());
bool to_boolean_value = stub.UpdateStatus(object);
Handle<Code> code = stub.GetCode(isolate());
set_target(*code);
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index 99309f4ed..e70cb82c9 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -101,7 +101,9 @@ class IC {
}
// Clear the inline cache to initial state.
- static void Clear(Isolate* isolate, Address address);
+ static void Clear(Isolate* isolate,
+ Address address,
+ ConstantPoolArray* constant_pool);
#ifdef DEBUG
bool IsLoadStub() const {
@@ -155,14 +157,17 @@ class IC {
Isolate* isolate() const { return isolate_; }
#ifdef ENABLE_DEBUGGER_SUPPORT
- // Computes the address in the original code when the code running is
- // containing break points (calls to DebugBreakXXX builtins).
- Address OriginalCodeAddress() const;
+ // Get the shared function info of the caller.
+ SharedFunctionInfo* GetSharedFunctionInfo() const;
+ // Get the code object of the caller.
+ Code* GetCode() const;
+ // Get the original (non-breakpointed) code object of the caller.
+ Code* GetOriginalCode() const;
#endif
// Set the call-site target.
void set_target(Code* code) {
- SetTargetAtAddress(address(), code);
+ SetTargetAtAddress(address(), code, constant_pool());
target_set_ = true;
}
@@ -180,8 +185,11 @@ class IC {
Failure* ReferenceError(const char* type, Handle<String> name);
// Access the target code for the given IC address.
- static inline Code* GetTargetAtAddress(Address address);
- static inline void SetTargetAtAddress(Address address, Code* target);
+ static inline Code* GetTargetAtAddress(Address address,
+ ConstantPoolArray* constant_pool);
+ static inline void SetTargetAtAddress(Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
static void PostPatching(Address address, Code* target, Code* old_target);
// Compute the handler either by compiling or by retrieving a cached version.
@@ -209,7 +217,7 @@ class IC {
virtual void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code);
void CopyICToMegamorphicCache(Handle<String> name);
- bool IsTransitionOfMonomorphicTarget(Handle<HeapType> type);
+ bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map);
void PatchCache(Handle<HeapType> type,
Handle<String> name,
Handle<Code> code);
@@ -239,8 +247,17 @@ class IC {
extra_ic_state_ = state;
}
+ protected:
+ void UpdateTarget() {
+ target_ = handle(raw_target(), isolate_);
+ }
+
private:
- Code* raw_target() const { return GetTargetAtAddress(address()); }
+ Code* raw_target() const {
+ return GetTargetAtAddress(address(), constant_pool());
+ }
+ inline ConstantPoolArray* constant_pool() const;
+ inline ConstantPoolArray* raw_constant_pool() const;
// Frame pointer for the frame that uses (calls) the IC.
Address fp_;
@@ -253,6 +270,10 @@ class IC {
Isolate* isolate_;
+ // The constant pool of the code which originally called the IC (which might
+ // be for the breakpointed copy of the original code).
+ Handle<ConstantPoolArray> raw_constant_pool_;
+
// The original code target that missed.
Handle<Code> target_;
State state_;
@@ -320,8 +341,7 @@ class LoadIC: public IC {
GenerateMiss(masm);
}
static void GenerateMiss(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_state);
+ static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
static void GenerateRuntimeGetProperty(MacroAssembler* masm);
@@ -374,7 +394,10 @@ class LoadIC: public IC {
Representation representation =
Representation::Tagged());
- static void Clear(Isolate* isolate, Address address, Code* target);
+ static void Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
friend class IC;
};
@@ -400,7 +423,7 @@ class KeyedLoadIC: public LoadIC {
static void GenerateGeneric(MacroAssembler* masm);
static void GenerateString(MacroAssembler* masm);
static void GenerateIndexedInterceptor(MacroAssembler* masm);
- static void GenerateNonStrictArguments(MacroAssembler* masm);
+ static void GenerateSloppyArguments(MacroAssembler* masm);
// Bit mask to be tested against bit field for the cases when
// generic stub should go into slow case.
@@ -437,14 +460,17 @@ class KeyedLoadIC: public LoadIC {
Handle<Code> indexed_interceptor_stub() {
return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor();
}
- Handle<Code> non_strict_arguments_stub() {
- return isolate()->builtins()->KeyedLoadIC_NonStrictArguments();
+ Handle<Code> sloppy_arguments_stub() {
+ return isolate()->builtins()->KeyedLoadIC_SloppyArguments();
}
Handle<Code> string_stub() {
return isolate()->builtins()->KeyedLoadIC_String();
}
- static void Clear(Isolate* isolate, Address address, Code* target);
+ static void Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
friend class IC;
};
@@ -452,12 +478,11 @@ class KeyedLoadIC: public LoadIC {
class StoreIC: public IC {
public:
- class StrictModeState: public BitField<StrictModeFlag, 1, 1> {};
- static ExtraICState ComputeExtraICState(StrictModeFlag flag) {
+ class StrictModeState: public BitField<StrictMode, 1, 1> {};
+ static ExtraICState ComputeExtraICState(StrictMode flag) {
return StrictModeState::encode(flag);
}
-
- static StrictModeFlag GetStrictMode(ExtraICState state) {
+ static StrictMode GetStrictMode(ExtraICState state) {
return StrictModeState::decode(state);
}
@@ -471,7 +496,7 @@ class StoreIC: public IC {
ASSERT(IsStoreStub());
}
- StrictModeFlag strict_mode() const {
+ StrictMode strict_mode() const {
return StrictModeState::decode(extra_ic_state());
}
@@ -482,14 +507,13 @@ class StoreIC: public IC {
GenerateMiss(masm);
}
static void GenerateMiss(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state);
+ static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
static Handle<Code> initialize_stub(Isolate* isolate,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
MUST_USE_RESULT MaybeObject* Store(
Handle<Object> object,
@@ -514,7 +538,7 @@ class StoreIC: public IC {
}
static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
// Update the inline cache and the global stub cache based on the
// lookup result.
@@ -536,7 +560,10 @@ class StoreIC: public IC {
IC::set_target(code);
}
- static void Clear(Isolate* isolate, Address address, Code* target);
+ static void Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
friend class IC;
};
@@ -561,7 +588,7 @@ class KeyedStoreIC: public StoreIC {
class ExtraICStateKeyedAccessStoreMode:
public BitField<KeyedAccessStoreMode, 2, 4> {}; // NOLINT
- static ExtraICState ComputeExtraICState(StrictModeFlag flag,
+ static ExtraICState ComputeExtraICState(StrictMode flag,
KeyedAccessStoreMode mode) {
return StrictModeState::encode(flag) |
ExtraICStateKeyedAccessStoreMode::encode(mode);
@@ -589,9 +616,9 @@ class KeyedStoreIC: public StoreIC {
static void GenerateMiss(MacroAssembler* masm);
static void GenerateSlow(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode);
- static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
- static void GenerateNonStrictArguments(MacroAssembler* masm);
+ StrictMode strict_mode);
+ static void GenerateGeneric(MacroAssembler* masm, StrictMode strict_mode);
+ static void GenerateSloppyArguments(MacroAssembler* masm);
protected:
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
@@ -602,8 +629,8 @@ class KeyedStoreIC: public StoreIC {
return pre_monomorphic_stub(isolate(), strict_mode());
}
static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
- StrictModeFlag strict_mode) {
- if (strict_mode == kStrictMode) {
+ StrictMode strict_mode) {
+ if (strict_mode == STRICT) {
return isolate->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
} else {
return isolate->builtins()->KeyedStoreIC_PreMonomorphic();
@@ -613,7 +640,7 @@ class KeyedStoreIC: public StoreIC {
return isolate()->builtins()->KeyedStoreIC_Slow();
}
virtual Handle<Code> megamorphic_stub() {
- if (strict_mode() == kStrictMode) {
+ if (strict_mode() == STRICT) {
return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
} else {
return isolate()->builtins()->KeyedStoreIC_Generic();
@@ -632,18 +659,21 @@ class KeyedStoreIC: public StoreIC {
// Stub accessors.
virtual Handle<Code> generic_stub() const {
- if (strict_mode() == kStrictMode) {
+ if (strict_mode() == STRICT) {
return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
} else {
return isolate()->builtins()->KeyedStoreIC_Generic();
}
}
- Handle<Code> non_strict_arguments_stub() {
- return isolate()->builtins()->KeyedStoreIC_NonStrictArguments();
+ Handle<Code> sloppy_arguments_stub() {
+ return isolate()->builtins()->KeyedStoreIC_SloppyArguments();
}
- static void Clear(Isolate* isolate, Address address, Code* target);
+ static void Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
Handle<Object> key,
@@ -850,7 +880,10 @@ class CompareIC: public IC {
static Code* GetRawUninitialized(Isolate* isolate, Token::Value op);
- static void Clear(Isolate* isolate, Address address, Code* target);
+ static void Clear(Isolate* isolate,
+ Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
Token::Value op_;
@@ -866,7 +899,9 @@ class CompareNilIC: public IC {
static Handle<Code> GetUninitialized();
- static void Clear(Address address, Code* target);
+ static void Clear(Address address,
+ Code* target,
+ ConstantPoolArray* constant_pool);
static MUST_USE_RESULT MaybeObject* DoCompareNilSlow(NilValue nil,
Handle<Object> object);
diff --git a/deps/v8/src/icu_util.cc b/deps/v8/src/icu_util.cc
index b9bd65edc..1fff8170f 100644
--- a/deps/v8/src/icu_util.cc
+++ b/deps/v8/src/icu_util.cc
@@ -27,12 +27,21 @@
#include "icu_util.h"
-#if defined(_WIN32) && defined(V8_I18N_SUPPORT)
+#if defined(_WIN32)
#include <windows.h>
+#endif
+
+#if defined(V8_I18N_SUPPORT)
+#include <stdio.h>
+#include <stdlib.h>
#include "unicode/putil.h"
#include "unicode/udata.h"
+#define ICU_UTIL_DATA_FILE 0
+#define ICU_UTIL_DATA_SHARED 1
+#define ICU_UTIL_DATA_STATIC 2
+
#define ICU_UTIL_DATA_SYMBOL "icudt" U_ICU_VERSION_SHORT "_dat"
#define ICU_UTIL_DATA_SHARED_MODULE_NAME "icudt.dll"
#endif
@@ -41,8 +50,22 @@ namespace v8 {
namespace internal {
-bool InitializeICU() {
-#if defined(_WIN32) && defined(V8_I18N_SUPPORT)
+#if defined(V8_I18N_SUPPORT) && (ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE)
+namespace {
+char* g_icu_data_ptr = NULL;
+
+void free_icu_data_ptr() {
+ delete[] g_icu_data_ptr;
+}
+
+} // namespace
+#endif
+
+bool InitializeICU(const char* icu_data_file) {
+#if !defined(V8_I18N_SUPPORT)
+ return true;
+#else
+#if ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_SHARED
// We expect to find the ICU data module alongside the current module.
HMODULE module = LoadLibraryA(ICU_UTIL_DATA_SHARED_MODULE_NAME);
if (!module) return false;
@@ -53,9 +76,36 @@ bool InitializeICU() {
UErrorCode err = U_ZERO_ERROR;
udata_setCommonData(reinterpret_cast<void*>(addr), &err);
return err == U_ZERO_ERROR;
-#else
+#elif ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_STATIC
// Mac/Linux bundle the ICU data in.
return true;
+#elif ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE
+ if (!icu_data_file) return false;
+
+ if (g_icu_data_ptr) return true;
+
+ FILE* inf = fopen(icu_data_file, "rb");
+ if (!inf) return false;
+
+ fseek(inf, 0, SEEK_END);
+ size_t size = ftell(inf);
+ rewind(inf);
+
+ g_icu_data_ptr = new char[size];
+ if (fread(g_icu_data_ptr, 1, size, inf) != size) {
+ delete[] g_icu_data_ptr;
+ g_icu_data_ptr = NULL;
+ fclose(inf);
+ return false;
+ }
+ fclose(inf);
+
+ atexit(free_icu_data_ptr);
+
+ UErrorCode err = U_ZERO_ERROR;
+ udata_setCommonData(reinterpret_cast<void*>(g_icu_data_ptr), &err);
+ return err == U_ZERO_ERROR;
+#endif
#endif
}
diff --git a/deps/v8/src/icu_util.h b/deps/v8/src/icu_util.h
index 478abce50..6b50c185c 100644
--- a/deps/v8/src/icu_util.h
+++ b/deps/v8/src/icu_util.h
@@ -35,7 +35,7 @@ namespace internal {
// Call this function to load ICU's data tables for the current process. This
// function should be called before ICU is used.
-bool InitializeICU();
+bool InitializeICU(const char* icu_data_file);
} } // namespace v8::internal
diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc
index 1b9a28a5b..bbe0c51a5 100644
--- a/deps/v8/src/incremental-marking.cc
+++ b/deps/v8/src/incremental-marking.cc
@@ -83,28 +83,6 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
Isolate* isolate) {
ASSERT(obj->IsHeapObject());
IncrementalMarking* marking = isolate->heap()->incremental_marking();
- ASSERT(!marking->is_compacting_);
-
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- int counter = chunk->write_barrier_counter();
- if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
- marking->write_barriers_invoked_since_last_step_ +=
- MemoryChunk::kWriteBarrierCounterGranularity -
- chunk->write_barrier_counter();
- chunk->set_write_barrier_counter(
- MemoryChunk::kWriteBarrierCounterGranularity);
- }
-
- marking->RecordWrite(obj, slot, *slot);
-}
-
-
-void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
- Object** slot,
- Isolate* isolate) {
- ASSERT(obj->IsHeapObject());
- IncrementalMarking* marking = isolate->heap()->incremental_marking();
- ASSERT(marking->is_compacting_);
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
int counter = chunk->write_barrier_counter();
diff --git a/deps/v8/src/incremental-marking.h b/deps/v8/src/incremental-marking.h
index d47c300ef..f4362ff5d 100644
--- a/deps/v8/src/incremental-marking.h
+++ b/deps/v8/src/incremental-marking.h
@@ -100,7 +100,7 @@ class IncrementalMarking {
// Do some marking every time this much memory has been allocated or that many
// heavy (color-checking) write barriers have been invoked.
static const intptr_t kAllocatedThreshold = 65536;
- static const intptr_t kWriteBarriersInvokedThreshold = 65536;
+ static const intptr_t kWriteBarriersInvokedThreshold = 32768;
// Start off by marking this many times more memory than has been allocated.
static const intptr_t kInitialMarkingSpeed = 1;
// But if we are promoting a lot of data we need to mark faster to keep up
@@ -129,10 +129,6 @@ class IncrementalMarking {
Object** slot,
Isolate* isolate);
- static void RecordWriteForEvacuationFromCode(HeapObject* obj,
- Object** slot,
- Isolate* isolate);
-
// Record a slot for compaction. Returns false for objects that are
// guaranteed to be rescanned or not guaranteed to survive.
//
diff --git a/deps/v8/src/interpreter-irregexp.cc b/deps/v8/src/interpreter-irregexp.cc
index 2fc9fd302..de54d0c42 100644
--- a/deps/v8/src/interpreter-irregexp.cc
+++ b/deps/v8/src/interpreter-irregexp.cc
@@ -158,25 +158,12 @@ static int32_t Load16Aligned(const byte* pc) {
// matching terminates.
class BacktrackStack {
public:
- explicit BacktrackStack(Isolate* isolate) : isolate_(isolate) {
- if (isolate->irregexp_interpreter_backtrack_stack_cache() != NULL) {
- // If the cache is not empty reuse the previously allocated stack.
- data_ = isolate->irregexp_interpreter_backtrack_stack_cache();
- isolate->set_irregexp_interpreter_backtrack_stack_cache(NULL);
- } else {
- // Cache was empty. Allocate a new backtrack stack.
- data_ = NewArray<int>(kBacktrackStackSize);
- }
+ explicit BacktrackStack() {
+ data_ = NewArray<int>(kBacktrackStackSize);
}
~BacktrackStack() {
- if (isolate_->irregexp_interpreter_backtrack_stack_cache() == NULL) {
- // The cache is empty. Keep this backtrack stack around.
- isolate_->set_irregexp_interpreter_backtrack_stack_cache(data_);
- } else {
- // A backtrack stack was already cached, just release this one.
- DeleteArray(data_);
- }
+ DeleteArray(data_);
}
int* data() const { return data_; }
@@ -187,7 +174,6 @@ class BacktrackStack {
static const int kBacktrackStackSize = 10000;
int* data_;
- Isolate* isolate_;
DISALLOW_COPY_AND_ASSIGN(BacktrackStack);
};
@@ -204,7 +190,7 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
// BacktrackStack ensures that the memory allocated for the backtracking stack
// is returned to the system or cached if there is no stack being cached at
// the moment.
- BacktrackStack backtrack_stack(isolate);
+ BacktrackStack backtrack_stack;
int* backtrack_stack_base = backtrack_stack.data();
int* backtrack_sp = backtrack_stack_base;
int backtrack_stack_space = backtrack_stack.max_size();
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 8a2f4219c..7e06a2ed5 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -80,10 +80,6 @@ int ThreadId::GetCurrentThreadId() {
ThreadLocalTop::ThreadLocalTop() {
InitializeInternal();
- // This flag may be set using v8::V8::IgnoreOutOfMemoryException()
- // before an isolate is initialized. The initialize methods below do
- // not touch it to preserve its value.
- ignore_out_of_memory_ = false;
}
@@ -453,10 +449,10 @@ Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
// If the caller parameter is a function we skip frames until we're
// under it before starting to collect.
bool seen_caller = !caller->IsJSFunction();
- // First element is reserved to store the number of non-strict frames.
+ // First element is reserved to store the number of sloppy frames.
int cursor = 1;
int frames_seen = 0;
- int non_strict_frames = 0;
+ int sloppy_frames = 0;
bool encountered_strict_function = false;
for (StackFrameIterator iter(this);
!iter.done() && frames_seen < limit;
@@ -487,13 +483,13 @@ Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
Handle<Smi> offset(Smi::FromInt(frames[i].offset()), this);
// The stack trace API should not expose receivers and function
// objects on frames deeper than the top-most one with a strict
- // mode function. The number of non-strict frames is stored as
+ // mode function. The number of sloppy frames is stored as
// first element in the result array.
if (!encountered_strict_function) {
- if (!fun->shared()->is_classic_mode()) {
+ if (fun->shared()->strict_mode() == STRICT) {
encountered_strict_function = true;
} else {
- non_strict_frames++;
+ sloppy_frames++;
}
}
elements->set(cursor++, *recv);
@@ -503,7 +499,7 @@ Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
}
}
}
- elements->set(0, Smi::FromInt(non_strict_frames));
+ elements->set(0, Smi::FromInt(sloppy_frames));
Handle<JSArray> result = factory()->NewJSArrayWithElements(elements);
result->set_length(Smi::FromInt(cursor));
return result;
@@ -778,7 +774,7 @@ static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
v8::AccessType type) {
- ASSERT(receiver->IsAccessCheckNeeded());
+ ASSERT(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
// The callers of this method are not expecting a GC.
DisallowHeapAllocation no_gc;
@@ -829,7 +825,7 @@ bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
bool Isolate::MayIndexedAccess(JSObject* receiver,
uint32_t index,
v8::AccessType type) {
- ASSERT(receiver->IsAccessCheckNeeded());
+ ASSERT(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
// Check for compatibility between the security tokens in the
// current lexical context and the accessed object.
ASSERT(context());
@@ -946,10 +942,17 @@ Failure* Isolate::ReThrow(MaybeObject* exception) {
Failure* Isolate::ThrowIllegalOperation() {
+ if (FLAG_stack_trace_on_illegal) PrintStack(stdout);
return Throw(heap_.illegal_access_string());
}
+Failure* Isolate::ThrowInvalidStringLength() {
+ return Throw(*factory()->NewRangeError(
+ "invalid_string_length", HandleVector<Object>(NULL, 0)));
+}
+
+
void Isolate::ScheduleThrow(Object* exception) {
// When scheduling a throw we first throw the exception to get the
// error reporting if it is uncaught before rescheduling it.
@@ -1122,8 +1125,6 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
// while the bootstrapper is active since the infrastructure may not have
// been properly initialized.
if (!bootstrapping) {
- Handle<String> stack_trace;
- if (FLAG_trace_exception) stack_trace = StackTraceString();
Handle<JSArray> stack_trace_object;
if (capture_stack_trace_for_uncaught_exceptions_) {
if (IsErrorObject(exception_handle)) {
@@ -1163,7 +1164,6 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
"uncaught_exception",
location,
HandleVector<Object>(&exception_arg, 1),
- stack_trace,
stack_trace_object);
thread_local_top()->pending_message_obj_ = *message_obj;
if (location != NULL) {
@@ -1269,14 +1269,8 @@ void Isolate::ReportPendingMessages() {
ASSERT(has_pending_exception());
PropagatePendingExceptionToExternalTryCatch();
- // If the pending exception is OutOfMemoryException set out_of_memory in
- // the native context. Note: We have to mark the native context here
- // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
- // set it.
HandleScope scope(this);
- if (thread_local_top_.pending_exception_->IsOutOfMemory()) {
- context()->mark_out_of_memory();
- } else if (thread_local_top_.pending_exception_ ==
+ if (thread_local_top_.pending_exception_ ==
heap()->termination_exception()) {
// Do nothing: if needed, the exception has been already propagated to
// v8::TryCatch.
@@ -1307,8 +1301,7 @@ void Isolate::ReportPendingMessages() {
MessageLocation Isolate::GetMessageLocation() {
ASSERT(has_pending_exception());
- if (!thread_local_top_.pending_exception_->IsOutOfMemory() &&
- thread_local_top_.pending_exception_ != heap()->termination_exception() &&
+ if (thread_local_top_.pending_exception_ != heap()->termination_exception() &&
thread_local_top_.has_pending_message_ &&
!thread_local_top_.pending_message_obj_->IsTheHole() &&
!thread_local_top_.pending_message_obj_->IsTheHole()) {
@@ -1327,39 +1320,36 @@ bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
ASSERT(has_pending_exception());
PropagatePendingExceptionToExternalTryCatch();
- // Always reschedule out of memory exceptions.
- if (!is_out_of_memory()) {
- bool is_termination_exception =
- pending_exception() == heap_.termination_exception();
+ bool is_termination_exception =
+ pending_exception() == heap_.termination_exception();
- // Do not reschedule the exception if this is the bottom call.
- bool clear_exception = is_bottom_call;
+ // Do not reschedule the exception if this is the bottom call.
+ bool clear_exception = is_bottom_call;
- if (is_termination_exception) {
- if (is_bottom_call) {
- thread_local_top()->external_caught_exception_ = false;
- clear_pending_exception();
- return false;
- }
- } else if (thread_local_top()->external_caught_exception_) {
- // If the exception is externally caught, clear it if there are no
- // JavaScript frames on the way to the C++ frame that has the
- // external handler.
- ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
- Address external_handler_address =
- thread_local_top()->try_catch_handler_address();
- JavaScriptFrameIterator it(this);
- if (it.done() || (it.frame()->sp() > external_handler_address)) {
- clear_exception = true;
- }
- }
-
- // Clear the exception if needed.
- if (clear_exception) {
+ if (is_termination_exception) {
+ if (is_bottom_call) {
thread_local_top()->external_caught_exception_ = false;
clear_pending_exception();
return false;
}
+ } else if (thread_local_top()->external_caught_exception_) {
+ // If the exception is externally caught, clear it if there are no
+ // JavaScript frames on the way to the C++ frame that has the
+ // external handler.
+ ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
+ Address external_handler_address =
+ thread_local_top()->try_catch_handler_address();
+ JavaScriptFrameIterator it(this);
+ if (it.done() || (it.frame()->sp() > external_handler_address)) {
+ clear_exception = true;
+ }
+ }
+
+ // Clear the exception if needed.
+ if (clear_exception) {
+ thread_local_top()->external_caught_exception_ = false;
+ clear_pending_exception();
+ return false;
}
// Reschedule the exception.
@@ -1379,23 +1369,6 @@ void Isolate::SetCaptureStackTraceForUncaughtExceptions(
}
-bool Isolate::is_out_of_memory() {
- if (has_pending_exception()) {
- MaybeObject* e = pending_exception();
- if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
- return true;
- }
- }
- if (has_scheduled_exception()) {
- MaybeObject* e = scheduled_exception();
- if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
- return true;
- }
- }
- return false;
-}
-
-
Handle<Context> Isolate::native_context() {
return Handle<Context>(context()->global_object()->native_context());
}
@@ -1465,6 +1438,13 @@ Isolate::ThreadDataTable::~ThreadDataTable() {
}
+Isolate::PerIsolateThreadData::~PerIsolateThreadData() {
+#if defined(USE_SIMULATOR)
+ delete simulator_;
+#endif
+}
+
+
Isolate::PerIsolateThreadData*
Isolate::ThreadDataTable::Lookup(Isolate* isolate,
ThreadId thread_id) {
@@ -1545,7 +1525,6 @@ Isolate::Isolate()
global_handles_(NULL),
eternal_handles_(NULL),
thread_manager_(NULL),
- fp_stubs_generated_(false),
has_installed_extensions_(false),
string_tracker_(NULL),
regexp_stack_(NULL),
@@ -1565,8 +1544,8 @@ Isolate::Isolate()
optimizing_compiler_thread_(NULL),
sweeper_thread_(NULL),
num_sweeper_threads_(0),
- max_available_threads_(0),
- stress_deopt_count_(0) {
+ stress_deopt_count_(0),
+ next_optimization_id_(0) {
id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1);
TRACE_ISOLATE(constructor);
@@ -1581,18 +1560,9 @@ Isolate::Isolate()
thread_manager_ = new ThreadManager();
thread_manager_->isolate_ = this;
-#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
- V8_TARGET_ARCH_MIPS && !defined(__mips__)
- simulator_initialized_ = false;
- simulator_i_cache_ = NULL;
- simulator_redirection_ = NULL;
-#endif
-
#ifdef DEBUG
// heap_histograms_ initializes itself.
memset(&js_spill_information_, 0, sizeof(js_spill_information_));
- memset(code_kind_statistics_, 0,
- sizeof(code_kind_statistics_[0]) * Code::NUMBER_OF_KINDS);
#endif
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -1672,6 +1642,10 @@ void Isolate::Deinit() {
delete[] sweeper_thread_;
sweeper_thread_ = NULL;
+ if (FLAG_job_based_sweeping &&
+ heap_.mark_compact_collector()->IsConcurrentSweepingInProgress()) {
+ heap_.mark_compact_collector()->WaitUntilSweepingCompleted();
+ }
if (FLAG_hydrogen_stats) GetHStatistics()->Print();
@@ -1846,9 +1820,7 @@ void Isolate::PropagatePendingExceptionToExternalTryCatch() {
if (!external_caught) return;
- if (thread_local_top_.pending_exception_->IsOutOfMemory()) {
- // Do not propagate OOM exception: we should kill VM asap.
- } else if (thread_local_top_.pending_exception_ ==
+ if (thread_local_top_.pending_exception_ ==
heap()->termination_exception()) {
try_catch_handler()->can_continue_ = false;
try_catch_handler()->has_terminated_ = true;
@@ -1919,7 +1891,7 @@ bool Isolate::Init(Deserializer* des) {
}
// The initialization process does not handle memory exhaustion.
- DisallowAllocationFailure disallow_allocation_failure;
+ DisallowAllocationFailure disallow_allocation_failure(this);
InitializeLoggingAndCounters();
@@ -1967,7 +1939,7 @@ bool Isolate::Init(Deserializer* des) {
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS
Simulator::Initialize(this);
#endif
#endif
@@ -2005,6 +1977,12 @@ bool Isolate::Init(Deserializer* des) {
bootstrapper_->Initialize(create_heap_objects);
builtins_.SetUp(this, create_heap_objects);
+ if (FLAG_log_internal_timer_events) {
+ set_event_logger(Logger::LogInternalEvents);
+ } else {
+ set_event_logger(Logger::EmptyLogInternalEvents);
+ }
+
// Set default value if not yet set.
// TODO(yangguo): move this to ResourceConstraints::ConfigureDefaults
// once ResourceConstraints becomes an argument to the Isolate constructor.
@@ -2013,7 +1991,10 @@ bool Isolate::Init(Deserializer* des) {
max_available_threads_ = Max(Min(CPU::NumberOfProcessorsOnline(), 4), 1);
}
- num_sweeper_threads_ = SweeperThread::NumberOfThreads(max_available_threads_);
+ if (!FLAG_job_based_sweeping) {
+ num_sweeper_threads_ =
+ SweeperThread::NumberOfThreads(max_available_threads_);
+ }
if (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs) {
PrintF("Concurrent recompilation has been disabled for tracing.\n");
@@ -2099,17 +2080,14 @@ bool Isolate::Init(Deserializer* des) {
CodeStub::GenerateFPStubs(this);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(this);
StubFailureTrampolineStub::GenerateAheadOfTime(this);
- // TODO(mstarzinger): The following is an ugly hack to make sure the
- // interface descriptor is initialized even when stubs have been
- // deserialized out of the snapshot without the graph builder.
- FastCloneShallowArrayStub stub(FastCloneShallowArrayStub::CLONE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE, 0);
- stub.InitializeInterfaceDescriptor(
- this, code_stub_interface_descriptor(CodeStub::FastCloneShallowArray));
+ // Ensure interface descriptors are initialized even when stubs have been
+ // deserialized out of the snapshot without using the graph builder.
+ FastCloneShallowArrayStub::InstallDescriptors(this);
BinaryOpICStub::InstallDescriptors(this);
BinaryOpWithAllocationSiteStub::InstallDescriptors(this);
- CompareNilICStub::InitializeForIsolate(this);
- ToBooleanStub::InitializeForIsolate(this);
+ CompareNilICStub::InstallDescriptors(this);
+ ToBooleanStub::InstallDescriptors(this);
+ ToNumberStub::InstallDescriptors(this);
ArrayConstructorStubBase::InstallDescriptors(this);
InternalArrayConstructorStubBase::InstallDescriptors(this);
FastNewClosureStub::InstallDescriptors(this);
@@ -2318,4 +2296,25 @@ ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
#undef ISOLATE_FIELD_OFFSET
#endif
+
+Handle<JSObject> Isolate::GetSymbolRegistry() {
+ if (heap()->symbol_registry()->IsUndefined()) {
+ Handle<Map> map = factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ Handle<JSObject> registry = factory()->NewJSObjectFromMap(map);
+ heap()->set_symbol_registry(*registry);
+
+ static const char* nested[] = {
+ "for", "for_api", "for_intern", "keyFor", "private_api", "private_intern"
+ };
+ for (unsigned i = 0; i < ARRAY_SIZE(nested); ++i) {
+ Handle<String> name = factory()->InternalizeUtf8String(nested[i]);
+ Handle<JSObject> obj = factory()->NewJSObjectFromMap(map);
+ JSObject::NormalizeProperties(obj, KEEP_INOBJECT_PROPERTIES, 8);
+ JSObject::SetProperty(registry, name, obj, NONE, STRICT);
+ }
+ }
+ return Handle<JSObject>::cast(factory()->symbol_registry());
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index d93a86229..b4713786a 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -102,6 +102,7 @@ class DebuggerAgent;
#endif
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
class Redirection;
class Simulator;
@@ -145,7 +146,6 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
do { \
ASSERT(!(isolate)->has_pending_exception()); \
CHECK(!(call).is_null()); \
- CHECK(!(isolate)->has_pending_exception()); \
} while (false)
#define RETURN_IF_EMPTY_HANDLE(isolate, call) \
@@ -207,6 +207,11 @@ class ThreadId {
};
+#define FIELD_ACCESSOR(type, name) \
+ inline void set_##name(type v) { name##_ = v; } \
+ inline type name() const { return name##_; }
+
+
class ThreadLocalTop BASE_EMBEDDED {
public:
// Does early low-level initialization that does not depend on the
@@ -233,14 +238,7 @@ class ThreadLocalTop BASE_EMBEDDED {
// stack, try_catch_handler_address returns a JS stack address that
// corresponds to the place on the JS stack where the C++ handler
// would have been if the stack were not separate.
- inline Address try_catch_handler_address() {
- return try_catch_handler_address_;
- }
-
- // Set the address of the top C++ try catch handler.
- inline void set_try_catch_handler_address(Address address) {
- try_catch_handler_address_ = address;
- }
+ FIELD_ACCESSOR(Address, try_catch_handler_address)
void Free() {
ASSERT(!has_pending_message_);
@@ -290,9 +288,6 @@ class ThreadLocalTop BASE_EMBEDDED {
// Head of the list of live LookupResults.
LookupResult* top_lookup_result_;
- // Whether out of memory exceptions should be ignored.
- bool ignore_out_of_memory_;
-
private:
void InitializeInternal();
@@ -310,11 +305,28 @@ class ThreadLocalTop BASE_EMBEDDED {
#endif
+
+#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
+ V8_TARGET_ARCH_ARM64 && !defined(__aarch64__) || \
+ V8_TARGET_ARCH_MIPS && !defined(__mips__)
+
+#define ISOLATE_INIT_SIMULATOR_LIST(V) \
+ V(bool, simulator_initialized, false) \
+ V(HashMap*, simulator_i_cache, NULL) \
+ V(Redirection*, simulator_redirection, NULL)
+#else
+
+#define ISOLATE_INIT_SIMULATOR_LIST(V)
+
+#endif
+
+
#ifdef DEBUG
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
V(CommentStatistic, paged_space_comments_statistics, \
- CommentStatistic::kMaxComments + 1)
+ CommentStatistic::kMaxComments + 1) \
+ V(int, code_kind_statistics, Code::NUMBER_OF_KINDS)
#else
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
@@ -341,31 +353,39 @@ typedef List<HeapObject*> DebugObjectCache;
/* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */ \
V(byte*, assembler_spare_buffer, NULL) \
V(FatalErrorCallback, exception_behavior, NULL) \
+ V(LogEventCallback, event_logger, NULL) \
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL) \
/* To distinguish the function templates, so that we can find them in the */ \
/* function cache of the native context. */ \
V(int, next_serial_number, 0) \
V(ExternalReferenceRedirectorPointer*, external_reference_redirector, NULL) \
- V(bool, always_allow_natives_syntax, false) \
/* Part of the state of liveedit. */ \
V(FunctionInfoListener*, active_function_info_listener, NULL) \
/* State for Relocatable. */ \
V(Relocatable*, relocatable_top, NULL) \
V(DebugObjectCache*, string_stream_debug_object_cache, NULL) \
V(Object*, string_stream_current_security_token, NULL) \
- /* TODO(isolates): Release this on destruction? */ \
- V(int*, irregexp_interpreter_backtrack_stack_cache, NULL) \
/* Serializer state. */ \
V(ExternalReferenceTable*, external_reference_table, NULL) \
/* AstNode state. */ \
V(int, ast_node_id, 0) \
V(unsigned, ast_node_count, 0) \
- V(bool, microtask_pending, false) \
+ V(bool, microtask_pending, false) \
+ V(bool, autorun_microtasks, true) \
V(HStatistics*, hstatistics, NULL) \
V(HTracer*, htracer, NULL) \
V(CodeTracer*, code_tracer, NULL) \
+ V(bool, fp_stubs_generated, false) \
+ V(int, max_available_threads, 0) \
+ V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
+ ISOLATE_INIT_SIMULATOR_LIST(V) \
ISOLATE_DEBUGGER_INIT_LIST(V)
+#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
+ inline void set_##name(type v) { thread_local_top_.name##_ = v; } \
+ inline type name() const { return thread_local_top_.name##_; }
+
+
class Isolate {
// These forward declarations are required to make the friend declarations in
// PerIsolateThreadData work on some older versions of gcc.
@@ -385,24 +405,23 @@ class Isolate {
stack_limit_(0),
thread_state_(NULL),
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
simulator_(NULL),
#endif
next_(NULL),
prev_(NULL) { }
+ ~PerIsolateThreadData();
Isolate* isolate() const { return isolate_; }
ThreadId thread_id() const { return thread_id_; }
- void set_stack_limit(uintptr_t value) { stack_limit_ = value; }
- uintptr_t stack_limit() const { return stack_limit_; }
- ThreadState* thread_state() const { return thread_state_; }
- void set_thread_state(ThreadState* value) { thread_state_ = value; }
+
+ FIELD_ACCESSOR(uintptr_t, stack_limit)
+ FIELD_ACCESSOR(ThreadState*, thread_state)
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
- Simulator* simulator() const { return simulator_; }
- void set_simulator(Simulator* simulator) {
- simulator_ = simulator;
- }
+ FIELD_ACCESSOR(Simulator*, simulator)
#endif
bool Matches(Isolate* isolate, ThreadId thread_id) const {
@@ -416,6 +435,7 @@ class Isolate {
ThreadState* thread_state_;
#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
!defined(__mips__) && V8_TARGET_ARCH_MIPS
Simulator* simulator_;
#endif
@@ -541,38 +561,35 @@ class Isolate {
}
Context** context_address() { return &thread_local_top_.context_; }
- SaveContext* save_context() { return thread_local_top_.save_context_; }
- void set_save_context(SaveContext* save) {
- thread_local_top_.save_context_ = save;
- }
+ THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context)
// Access to current thread id.
- ThreadId thread_id() { return thread_local_top_.thread_id_; }
- void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
+ THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
// Interface to pending exception.
MaybeObject* pending_exception() {
ASSERT(has_pending_exception());
return thread_local_top_.pending_exception_;
}
- bool external_caught_exception() {
- return thread_local_top_.external_caught_exception_;
- }
- void set_external_caught_exception(bool value) {
- thread_local_top_.external_caught_exception_ = value;
- }
+
void set_pending_exception(MaybeObject* exception) {
thread_local_top_.pending_exception_ = exception;
}
+
void clear_pending_exception() {
thread_local_top_.pending_exception_ = heap_.the_hole_value();
}
+
MaybeObject** pending_exception_address() {
return &thread_local_top_.pending_exception_;
}
+
bool has_pending_exception() {
return !thread_local_top_.pending_exception_->IsTheHole();
}
+
+ THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
+
void clear_pending_message() {
thread_local_top_.has_pending_message_ = false;
thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
@@ -587,12 +604,8 @@ class Isolate {
bool* external_caught_exception_address() {
return &thread_local_top_.external_caught_exception_;
}
- v8::TryCatch* catcher() {
- return thread_local_top_.catcher_;
- }
- void set_catcher(v8::TryCatch* catcher) {
- thread_local_top_.catcher_ = catcher;
- }
+
+ THREAD_LOCAL_TOP_ACCESSOR(v8::TryCatch*, catcher)
MaybeObject** scheduled_exception_address() {
return &thread_local_top_.scheduled_exception_;
@@ -625,8 +638,7 @@ class Isolate {
bool IsExternallyCaught();
bool is_catchable_by_javascript(MaybeObject* exception) {
- return (!exception->IsOutOfMemory()) &&
- (exception != heap()->termination_exception());
+ return exception != heap()->termination_exception();
}
// Serializer.
@@ -705,16 +717,6 @@ class Isolate {
int frame_limit,
StackTrace::StackTraceOptions options);
- // Tells whether the current context has experienced an out of memory
- // exception.
- bool is_out_of_memory();
- bool ignore_out_of_memory() {
- return thread_local_top_.ignore_out_of_memory_;
- }
- void set_ignore_out_of_memory(bool value) {
- thread_local_top_.ignore_out_of_memory_ = value;
- }
-
void PrintCurrentStackTrace(FILE* out);
void PrintStack(StringStream* accumulator);
void PrintStack(FILE* out);
@@ -747,6 +749,10 @@ class Isolate {
v8::AccessType type) {
return MayIndexedAccess(*receiver, index, type);
}
+ void ReportFailedAccessCheckWrapper(Handle<JSObject> receiver,
+ v8::AccessType type) {
+ ReportFailedAccessCheck(*receiver, type);
+ }
bool MayNamedAccess(JSObject* receiver,
Object* key,
@@ -773,6 +779,7 @@ class Isolate {
// Return pending location if any or unfilled structure.
MessageLocation GetMessageLocation();
Failure* ThrowIllegalOperation();
+ Failure* ThrowInvalidStringLength();
// Promote a scheduled exception to pending. Asserts has_scheduled_exception.
Failure* PromoteScheduledException();
@@ -938,12 +945,6 @@ class Isolate {
RuntimeState* runtime_state() { return &runtime_state_; }
- void set_fp_stubs_generated(bool value) {
- fp_stubs_generated_ = value;
- }
-
- bool fp_stubs_generated() { return fp_stubs_generated_; }
-
Builtins* builtins() { return &builtins_; }
void NotifyExtensionInstalled() {
@@ -989,48 +990,15 @@ class Isolate {
JSObject::SpillInformation* js_spill_information() {
return &js_spill_information_;
}
-
- int* code_kind_statistics() { return code_kind_statistics_; }
-#endif
-
-#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
- V8_TARGET_ARCH_MIPS && !defined(__mips__)
- bool simulator_initialized() { return simulator_initialized_; }
- void set_simulator_initialized(bool initialized) {
- simulator_initialized_ = initialized;
- }
-
- HashMap* simulator_i_cache() { return simulator_i_cache_; }
- void set_simulator_i_cache(HashMap* hash_map) {
- simulator_i_cache_ = hash_map;
- }
-
- Redirection* simulator_redirection() {
- return simulator_redirection_;
- }
- void set_simulator_redirection(Redirection* redirection) {
- simulator_redirection_ = redirection;
- }
#endif
Factory* factory() { return reinterpret_cast<Factory*>(this); }
static const int kJSRegexpStaticOffsetsVectorSize = 128;
- ExternalCallbackScope* external_callback_scope() {
- return thread_local_top_.external_callback_scope_;
- }
- void set_external_callback_scope(ExternalCallbackScope* scope) {
- thread_local_top_.external_callback_scope_ = scope;
- }
+ THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
- StateTag current_vm_state() {
- return thread_local_top_.current_vm_state_;
- }
-
- void set_current_vm_state(StateTag state) {
- thread_local_top_.current_vm_state_ = state;
- }
+ THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
void SetData(uint32_t slot, void* data) {
ASSERT(slot < Internals::kNumIsolateDataSlots);
@@ -1041,12 +1009,7 @@ class Isolate {
return embedder_data_[slot];
}
- LookupResult* top_lookup_result() {
- return thread_local_top_.top_lookup_result_;
- }
- void SetTopLookupResult(LookupResult* top) {
- thread_local_top_.top_lookup_result_ = top;
- }
+ THREAD_LOCAL_TOP_ACCESSOR(LookupResult*, top_lookup_result)
bool IsDead() { return has_fatal_error_; }
void SignalFatalError() { has_fatal_error_ = true; }
@@ -1096,14 +1059,6 @@ class Isolate {
bool IsDeferredHandle(Object** location);
#endif // DEBUG
- int max_available_threads() const {
- return max_available_threads_;
- }
-
- void set_max_available_threads(int value) {
- max_available_threads_ = value;
- }
-
bool concurrent_recompilation_enabled() {
// Thread is only available with flag enabled.
ASSERT(optimizing_compiler_thread_ == NULL ||
@@ -1153,6 +1108,17 @@ class Isolate {
// Given an address occupied by a live code object, return that object.
Object* FindCodeObject(Address a);
+ int NextOptimizationId() {
+ int id = next_optimization_id_++;
+ if (!Smi::IsValid(next_optimization_id_)) {
+ next_optimization_id_ = 0;
+ }
+ return id;
+ }
+
+ // Get (and lazily initialize) the registry for per-isolate symbols.
+ Handle<JSObject> GetSymbolRegistry();
+
private:
Isolate();
@@ -1299,7 +1265,6 @@ class Isolate {
EternalHandles* eternal_handles_;
ThreadManager* thread_manager_;
RuntimeState runtime_state_;
- bool fp_stubs_generated_;
Builtins builtins_;
bool has_installed_extensions_;
StringTracker* string_tracker_;
@@ -1329,18 +1294,10 @@ class Isolate {
// Time stamp at initialization.
double time_millis_at_init_;
-#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
- V8_TARGET_ARCH_MIPS && !defined(__mips__)
- bool simulator_initialized_;
- HashMap* simulator_i_cache_;
- Redirection* simulator_redirection_;
-#endif
-
#ifdef DEBUG
// A static array of histogram info for each type.
HistogramInfo heap_histograms_[LAST_TYPE + 1];
JSObject::SpillInformation js_spill_information_;
- int code_kind_statistics_[Code::NUMBER_OF_KINDS];
#endif
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -1377,13 +1334,11 @@ class Isolate {
SweeperThread** sweeper_thread_;
int num_sweeper_threads_;
- // TODO(yangguo): This will become obsolete once ResourceConstraints
- // becomes an argument to Isolate constructor.
- int max_available_threads_;
-
// Counts deopt points if deopt_every_n_times is enabled.
unsigned int stress_deopt_count_;
+ int next_optimization_id_;
+
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class IsolateInitializer;
@@ -1403,6 +1358,10 @@ class Isolate {
};
+#undef FIELD_ACCESSOR
+#undef THREAD_LOCAL_TOP_ACCESSOR
+
+
// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
// class as a work around for a bug in the generated code found with these
// versions of GCC. See V8 issue 122 for details.
@@ -1509,17 +1468,6 @@ class PostponeInterruptsScope BASE_EMBEDDED {
};
-// Tells whether the native context is marked with out of memory.
-inline bool Context::has_out_of_memory() {
- return native_context()->out_of_memory()->IsTrue();
-}
-
-
-// Mark the native context with out of memory.
-inline void Context::mark_out_of_memory() {
- native_context()->set_out_of_memory(GetIsolate()->heap()->true_value());
-}
-
class CodeTracer V8_FINAL : public Malloced {
public:
explicit CodeTracer(int isolate_id)
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index 72c69100d..4c2b47918 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -361,7 +361,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
Handle<Object> value = ParseJsonValue();
if (value.is_null()) return ReportUnexpectedCharacter();
- JSObject::SetOwnElement(json_object, index, value, kNonStrictMode);
+ JSObject::SetOwnElement(json_object, index, value, SLOPPY);
continue;
}
// Not an index, fallback to the slow path.
@@ -414,9 +414,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
if (value->FitsRepresentation(expected_representation)) {
// If the target representation is double and the value is already
// double, use the existing box.
- if (FLAG_track_double_fields &&
- value->IsSmi() &&
- expected_representation.IsDouble()) {
+ if (value->IsSmi() && expected_representation.IsDouble()) {
value = factory()->NewHeapNumber(
Handle<Smi>::cast(value)->value());
}
@@ -608,6 +606,7 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count));
Handle<StringType> seq_string =
NewRawString<StringType>(factory(), length, pretenure_);
+ ASSERT(!seq_string.is_null());
// Copy prefix into seq_str.
SinkChar* dest = seq_string->GetChars();
String::WriteToFlat(*prefix, dest, start, end);
@@ -795,6 +794,7 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
} while (c0_ != '"');
int length = position_ - beg_pos;
Handle<String> result = factory()->NewRawOneByteString(length, pretenure_);
+ ASSERT(!result.is_null());
uint8_t* dest = SeqOneByteString::cast(*result)->GetChars();
String::WriteToFlat(*source_, dest, beg_pos, position_);
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index 4510c4b45..3926969f6 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -51,6 +51,8 @@ class BasicJsonStringifier BASE_EMBEDDED {
enum Result { UNCHANGED, SUCCESS, EXCEPTION, CIRCULAR, STACK_OVERFLOW };
+ void Accumulate();
+
void Extend();
void ChangeEncoding();
@@ -178,6 +180,7 @@ class BasicJsonStringifier BASE_EMBEDDED {
int current_index_;
int part_length_;
bool is_ascii_;
+ bool overflowed_;
static const int kJsonEscapeTableEntrySize = 8;
static const char* const JsonEscapeTable;
@@ -254,12 +257,16 @@ const char* const BasicJsonStringifier::JsonEscapeTable =
BasicJsonStringifier::BasicJsonStringifier(Isolate* isolate)
- : isolate_(isolate), current_index_(0), is_ascii_(true) {
+ : isolate_(isolate),
+ current_index_(0),
+ is_ascii_(true),
+ overflowed_(false) {
factory_ = isolate_->factory();
accumulator_store_ = Handle<JSValue>::cast(
factory_->ToObject(factory_->empty_string()));
part_length_ = kInitialPartLength;
current_part_ = factory_->NewRawOneByteString(part_length_);
+ ASSERT(!current_part_.is_null());
tojson_string_ = factory_->toJSON_string();
stack_ = factory_->NewJSArray(8);
}
@@ -269,9 +276,12 @@ MaybeObject* BasicJsonStringifier::Stringify(Handle<Object> object) {
switch (SerializeObject(object)) {
case UNCHANGED:
return isolate_->heap()->undefined_value();
- case SUCCESS:
+ case SUCCESS: {
ShrinkCurrentPart();
- return *factory_->NewConsString(accumulator(), current_part_);
+ Accumulate();
+ if (overflowed_) return isolate_->ThrowInvalidStringLength();
+ return *accumulator();
+ }
case CIRCULAR:
return isolate_->Throw(*factory_->NewTypeError(
"circular_structure", HandleVector<Object>(NULL, 0)));
@@ -300,6 +310,7 @@ MaybeObject* BasicJsonStringifier::StringifyString(Isolate* isolate,
if (object->IsOneByteRepresentationUnderneath()) {
Handle<String> result =
isolate->factory()->NewRawOneByteString(worst_case_length);
+ ASSERT(!result.is_null());
DisallowHeapAllocation no_gc;
return StringifyString_<SeqOneByteString>(
isolate,
@@ -308,6 +319,7 @@ MaybeObject* BasicJsonStringifier::StringifyString(Isolate* isolate,
} else {
Handle<String> result =
isolate->factory()->NewRawTwoByteString(worst_case_length);
+ ASSERT(!result.is_null());
DisallowHeapAllocation no_gc;
return StringifyString_<SeqTwoByteString>(
isolate,
@@ -381,13 +393,16 @@ BasicJsonStringifier::Result BasicJsonStringifier::StackPush(
if (check.HasOverflowed()) return STACK_OVERFLOW;
int length = Smi::cast(stack_->length())->value();
- FixedArray* elements = FixedArray::cast(stack_->elements());
- for (int i = 0; i < length; i++) {
- if (elements->get(i) == *object) {
- return CIRCULAR;
+ {
+ DisallowHeapAllocation no_allocation;
+ FixedArray* elements = FixedArray::cast(stack_->elements());
+ for (int i = 0; i < length; i++) {
+ if (elements->get(i) == *object) {
+ return CIRCULAR;
+ }
}
}
- stack_->EnsureSize(length + 1);
+ JSArray::EnsureSize(stack_, length + 1);
FixedArray::cast(stack_->elements())->set(length, *object);
stack_->set_length(Smi::FromInt(length + 1));
return SUCCESS;
@@ -486,7 +501,9 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeGeneric(
part_length_ = kInitialPartLength; // Allocate conservatively.
Extend(); // Attach current part and allocate new part.
// Attach result string to the accumulator.
- set_accumulator(factory_->NewConsString(accumulator(), result_string));
+ Handle<String> cons = factory_->NewConsString(accumulator(), result_string);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate_, cons, EXCEPTION);
+ set_accumulator(cons);
return SUCCESS;
}
@@ -655,7 +672,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
isolate_);
} else {
property = GetProperty(isolate_, object, key);
- if (property.is_null()) return EXCEPTION;
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate_, property, EXCEPTION);
}
Result result = SerializeProperty(property, comma, key);
if (!comma && result == SUCCESS) comma = true;
@@ -687,7 +704,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
property = GetProperty(isolate_, object, key_handle);
}
}
- if (property.is_null()) return EXCEPTION;
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate_, property, EXCEPTION);
Result result = SerializeProperty(property, comma, key_handle);
if (!comma && result == SUCCESS) comma = true;
if (result >= EXCEPTION) return result;
@@ -708,8 +725,19 @@ void BasicJsonStringifier::ShrinkCurrentPart() {
}
+void BasicJsonStringifier::Accumulate() {
+ if (accumulator()->length() + current_part_->length() > String::kMaxLength) {
+ // Screw it. Simply set the flag and carry on. Throw exception at the end.
+ set_accumulator(factory_->empty_string());
+ overflowed_ = true;
+ } else {
+ set_accumulator(factory_->NewConsString(accumulator(), current_part_));
+ }
+}
+
+
void BasicJsonStringifier::Extend() {
- set_accumulator(factory_->NewConsString(accumulator(), current_part_));
+ Accumulate();
if (part_length_ <= kMaxPartLength / kPartLengthGrowthFactor) {
part_length_ *= kPartLengthGrowthFactor;
}
@@ -718,14 +746,16 @@ void BasicJsonStringifier::Extend() {
} else {
current_part_ = factory_->NewRawTwoByteString(part_length_);
}
+ ASSERT(!current_part_.is_null());
current_index_ = 0;
}
void BasicJsonStringifier::ChangeEncoding() {
ShrinkCurrentPart();
- set_accumulator(factory_->NewConsString(accumulator(), current_part_));
+ Accumulate();
current_part_ = factory_->NewRawTwoByteString(part_length_);
+ ASSERT(!current_part_.is_null());
current_index_ = 0;
is_ascii_ = false;
}
diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js
index c21e6351d..fc4b58dec 100644
--- a/deps/v8/src/json.js
+++ b/deps/v8/src/json.js
@@ -210,6 +210,28 @@ function JSONStringify(value, replacer, space) {
} else {
gap = "";
}
+ if (IS_ARRAY(replacer)) {
+ // Deduplicate replacer array items.
+ var property_list = new InternalArray();
+ var seen_properties = { __proto__: null };
+ var seen_sentinel = {};
+ var length = replacer.length;
+ for (var i = 0; i < length; i++) {
+ var item = replacer[i];
+ if (IS_STRING_WRAPPER(item)) {
+ item = ToString(item);
+ } else {
+ if (IS_NUMBER_WRAPPER(item)) item = ToNumber(item);
+ if (IS_NUMBER(item)) item = %_NumberToString(item);
+ }
+ if (IS_STRING(item) && seen_properties[item] != seen_sentinel) {
+ property_list.push(item);
+ // We cannot use true here because __proto__ needs to be an object.
+ seen_properties[item] = seen_sentinel;
+ }
+ }
+ replacer = property_list;
+ }
return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap);
}
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index edd2eacd3..a30fc26ff 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -49,6 +49,8 @@
#include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/regexp-macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/regexp-macro-assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -464,6 +466,7 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
// Unable to compile regexp.
Handle<String> error_message =
isolate->factory()->NewStringFromUtf8(CStrVector(result.error_message));
+ ASSERT(!error_message.is_null());
CreateRegExpErrorObjectAndThrow(re, is_ascii, error_message, isolate);
return false;
}
@@ -688,7 +691,8 @@ Handle<JSArray> RegExpImpl::SetLastMatchInfo(Handle<JSArray> last_match_info,
int32_t* match) {
ASSERT(last_match_info->HasFastObjectElements());
int capture_register_count = (capture_count + 1) * 2;
- last_match_info->EnsureSize(capture_register_count + kLastMatchOverhead);
+ JSArray::EnsureSize(last_match_info,
+ capture_register_count + kLastMatchOverhead);
DisallowHeapAllocation no_allocation;
FixedArray* array = FixedArray::cast(last_match_info->elements());
if (match != NULL) {
@@ -3597,9 +3601,12 @@ class AlternativeGenerationList {
// The '2' variant is has inclusive from and exclusive to.
-static const int kSpaceRanges[] = { '\t', '\r' + 1, ' ', ' ' + 1, 0x00A0,
- 0x00A1, 0x1680, 0x1681, 0x180E, 0x180F, 0x2000, 0x200B, 0x2028, 0x202A,
- 0x202F, 0x2030, 0x205F, 0x2060, 0x3000, 0x3001, 0xFEFF, 0xFF00, 0x10000 };
+// This covers \s as defined in ECMA-262 5.1, 15.10.2.12,
+// which include WhiteSpace (7.2) or LineTerminator (7.3) values.
+static const int kSpaceRanges[] = { '\t', '\r' + 1, ' ', ' ' + 1,
+ 0x00A0, 0x00A1, 0x1680, 0x1681, 0x180E, 0x180F, 0x2000, 0x200B,
+ 0x2028, 0x202A, 0x202F, 0x2030, 0x205F, 0x2060, 0x3000, 0x3001,
+ 0xFEFF, 0xFF00, 0x10000 };
static const int kSpaceRangeCount = ARRAY_SIZE(kSpaceRanges);
static const int kWordRanges[] = {
@@ -6085,9 +6092,14 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
#elif V8_TARGET_ARCH_ARM
RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
+#elif V8_TARGET_ARCH_ARM64
+ RegExpMacroAssemblerARM64 macro_assembler(mode, (data->capture_count + 1) * 2,
+ zone);
#elif V8_TARGET_ARCH_MIPS
RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
zone);
+#else
+#error "Unsupported architecture"
#endif
#else // V8_INTERPRETED_REGEXP
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index 877b3a63e..5c4883234 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -50,6 +50,8 @@ class DefaultPlatform : public Platform {
void SetThreadPoolSize(int thread_pool_size);
+ void EnsureInitialized();
+
// v8::Platform implementation.
virtual void CallOnBackgroundThread(
Task *task, ExpectedRuntime expected_runtime) V8_OVERRIDE;
@@ -59,8 +61,6 @@ class DefaultPlatform : public Platform {
private:
static const int kMaxThreadPoolSize = 4;
- void EnsureInitialized();
-
Mutex lock_;
bool initialized_;
int thread_pool_size_;
diff --git a/deps/v8/src/lithium-allocator-inl.h b/deps/v8/src/lithium-allocator-inl.h
index deee98877..7c0cba7fb 100644
--- a/deps/v8/src/lithium-allocator-inl.h
+++ b/deps/v8/src/lithium-allocator-inl.h
@@ -34,6 +34,8 @@
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/lithium-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 48fa862c9..9987161d4 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -35,6 +35,8 @@
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/lithium-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h
index 9908ea823..8a1476a04 100644
--- a/deps/v8/src/lithium-allocator.h
+++ b/deps/v8/src/lithium-allocator.h
@@ -47,16 +47,12 @@ class HValue;
class BitVector;
class StringStream;
-class LArgument;
class LPlatformChunk;
class LOperand;
class LUnallocated;
-class LConstantOperand;
class LGap;
class LParallelMove;
class LPointerMap;
-class LStackSlot;
-class LRegister;
// This class represents a single point of a LOperand's lifetime.
diff --git a/deps/v8/src/lithium-codegen.cc b/deps/v8/src/lithium-codegen.cc
index 2d71d13c6..be0ff8371 100644
--- a/deps/v8/src/lithium-codegen.cc
+++ b/deps/v8/src/lithium-codegen.cc
@@ -38,6 +38,9 @@
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#include "arm/lithium-codegen-arm.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/lithium-arm64.h"
+#include "arm64/lithium-codegen-arm64.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"
@@ -104,11 +107,9 @@ bool LCodeGenBase::GenerateBody() {
GenerateBodyInstructionPre(instr);
HValue* value = instr->hydrogen_value();
- if (value->position() != RelocInfo::kNoPosition) {
- ASSERT(!graph()->info()->IsOptimizing() ||
- !FLAG_emit_opt_code_positions ||
- value->position() != RelocInfo::kNoPosition);
- RecordAndWritePosition(value->position());
+ if (!value->position().IsUnknown()) {
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
}
instr->CompileToNative(codegen);
@@ -141,13 +142,15 @@ void LCodeGenBase::Comment(const char* format, ...) {
int LCodeGenBase::GetNextEmittedBlock() const {
for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
+ if (!graph()->blocks()->at(i)->IsReachable()) continue;
if (!chunk_->GetLabel(i)->HasReplacement()) return i;
}
return -1;
}
-void LCodeGenBase::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
+void LCodeGenBase::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) {
+ ASSERT(code->is_optimized_code());
ZoneList<Handle<Map> > maps(1, zone());
ZoneList<Handle<JSObject> > objects(1, zone());
ZoneList<Handle<Cell> > cells(1, zone());
@@ -156,11 +159,11 @@ void LCodeGenBase::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::CELL &&
- Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_cell())) {
+ code->IsWeakObjectInOptimizedCode(it.rinfo()->target_cell())) {
Handle<Cell> cell(it.rinfo()->target_cell());
cells.Add(cell, zone());
} else if (mode == RelocInfo::EMBEDDED_OBJECT &&
- Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ code->IsWeakObjectInOptimizedCode(it.rinfo()->target_object())) {
if (it.rinfo()->target_object()->IsMap()) {
Handle<Map> map(Map::cast(it.rinfo()->target_object()));
maps.Add(map, zone());
diff --git a/deps/v8/src/lithium-codegen.h b/deps/v8/src/lithium-codegen.h
index f6806781d..3e8d471ea 100644
--- a/deps/v8/src/lithium-codegen.h
+++ b/deps/v8/src/lithium-codegen.h
@@ -66,7 +66,7 @@ class LCodeGenBase BASE_EMBEDDED {
int GetNextEmittedBlock() const;
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
+ void RegisterWeakObjectsInOptimizedCode(Handle<Code> code);
protected:
enum Status {
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index b4f96290c..8753ff14a 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -41,6 +41,9 @@
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/lithium-arm64.h"
+#include "arm64/lithium-codegen-arm64.h"
#else
#error "Unknown architecture."
#endif
@@ -108,39 +111,40 @@ void LOperand::PrintTo(StringStream* stream) {
case DOUBLE_REGISTER:
stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
break;
- case ARGUMENT:
- stream->Add("[arg:%d]", index());
- break;
}
}
-#define DEFINE_OPERAND_CACHE(name, type) \
- L##name* L##name::cache = NULL; \
- \
- void L##name::SetUpCache() { \
- if (cache) return; \
- cache = new L##name[kNumCachedOperands]; \
- for (int i = 0; i < kNumCachedOperands; i++) { \
- cache[i].ConvertTo(type, i); \
- } \
- } \
- \
- void L##name::TearDownCache() { \
- delete[] cache; \
+
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+LSubKindOperand<kOperandKind, kNumCachedOperands>*
+LSubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
+
+
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+void LSubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
+ if (cache) return;
+ cache = new LSubKindOperand[kNumCachedOperands];
+ for (int i = 0; i < kNumCachedOperands; i++) {
+ cache[i].ConvertTo(kOperandKind, i);
}
+}
+
+
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+void LSubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
+ delete[] cache;
+}
-LITHIUM_OPERAND_LIST(DEFINE_OPERAND_CACHE)
-#undef DEFINE_OPERAND_CACHE
void LOperand::SetUpCaches() {
-#define LITHIUM_OPERAND_SETUP(name, type) L##name::SetUpCache();
+#define LITHIUM_OPERAND_SETUP(name, type, number) L##name::SetUpCache();
LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_SETUP)
#undef LITHIUM_OPERAND_SETUP
}
void LOperand::TearDownCaches() {
-#define LITHIUM_OPERAND_TEARDOWN(name, type) L##name::TearDownCache();
+#define LITHIUM_OPERAND_TEARDOWN(name, type, number) L##name::TearDownCache();
LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_TEARDOWN)
#undef LITHIUM_OPERAND_TEARDOWN
}
@@ -442,6 +446,7 @@ Handle<Code> LChunk::Codegen() {
CodeGenerator::PrintCode(code, info());
return code;
}
+ assembler.AbortedCodeGeneration();
return Handle<Code>::null();
}
@@ -495,10 +500,9 @@ LEnvironment* LChunkBuilderBase::CreateEnvironment(
LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
+ CHECK(!value->IsPushArgument()); // Do not deopt outgoing arguments
if (value->IsArgumentsObject() || value->IsCapturedObject()) {
op = LEnvironment::materialization_marker();
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
} else {
op = UseAny(value);
}
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index 754f88da8..8ae5b879d 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -35,12 +35,12 @@
namespace v8 {
namespace internal {
-#define LITHIUM_OPERAND_LIST(V) \
- V(ConstantOperand, CONSTANT_OPERAND) \
- V(StackSlot, STACK_SLOT) \
- V(DoubleStackSlot, DOUBLE_STACK_SLOT) \
- V(Register, REGISTER) \
- V(DoubleRegister, DOUBLE_REGISTER)
+#define LITHIUM_OPERAND_LIST(V) \
+ V(ConstantOperand, CONSTANT_OPERAND, 128) \
+ V(StackSlot, STACK_SLOT, 128) \
+ V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128) \
+ V(Register, REGISTER, 16) \
+ V(DoubleRegister, DOUBLE_REGISTER, 16)
class LOperand : public ZoneObject {
@@ -52,20 +52,18 @@ class LOperand : public ZoneObject {
STACK_SLOT,
DOUBLE_STACK_SLOT,
REGISTER,
- DOUBLE_REGISTER,
- ARGUMENT
+ DOUBLE_REGISTER
};
LOperand() : value_(KindField::encode(INVALID)) { }
Kind kind() const { return KindField::decode(value_); }
int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
-#define LITHIUM_OPERAND_PREDICATE(name, type) \
+#define LITHIUM_OPERAND_PREDICATE(name, type, number) \
bool Is##name() const { return kind() == type; }
LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_PREDICATE)
- LITHIUM_OPERAND_PREDICATE(Argument, ARGUMENT)
- LITHIUM_OPERAND_PREDICATE(Unallocated, UNALLOCATED)
- LITHIUM_OPERAND_PREDICATE(Ignored, INVALID)
+ LITHIUM_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
+ LITHIUM_OPERAND_PREDICATE(Ignored, INVALID, 0)
#undef LITHIUM_OPERAND_PREDICATE
bool Equals(LOperand* other) const { return value_ == other->value_; }
@@ -317,140 +315,35 @@ class LMoveOperands V8_FINAL BASE_EMBEDDED {
};
-class LConstantOperand V8_FINAL : public LOperand {
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+class LSubKindOperand V8_FINAL : public LOperand {
public:
- static LConstantOperand* Create(int index, Zone* zone) {
+ static LSubKindOperand* Create(int index, Zone* zone) {
ASSERT(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LConstantOperand(index);
+ return new(zone) LSubKindOperand(index);
}
- static LConstantOperand* cast(LOperand* op) {
- ASSERT(op->IsConstantOperand());
- return reinterpret_cast<LConstantOperand*>(op);
+ static LSubKindOperand* cast(LOperand* op) {
+ ASSERT(op->kind() == kOperandKind);
+ return reinterpret_cast<LSubKindOperand*>(op);
}
static void SetUpCache();
static void TearDownCache();
private:
- static const int kNumCachedOperands = 128;
- static LConstantOperand* cache;
+ static LSubKindOperand* cache;
- LConstantOperand() : LOperand() { }
- explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
+ LSubKindOperand() : LOperand() { }
+ explicit LSubKindOperand(int index) : LOperand(kOperandKind, index) { }
};
-class LArgument V8_FINAL : public LOperand {
- public:
- explicit LArgument(int index) : LOperand(ARGUMENT, index) { }
-
- static LArgument* cast(LOperand* op) {
- ASSERT(op->IsArgument());
- return reinterpret_cast<LArgument*>(op);
- }
-};
-
-
-class LStackSlot V8_FINAL : public LOperand {
- public:
- static LStackSlot* Create(int index, Zone* zone) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LStackSlot(index);
- }
-
- static LStackSlot* cast(LOperand* op) {
- ASSERT(op->IsStackSlot());
- return reinterpret_cast<LStackSlot*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LStackSlot* cache;
-
- LStackSlot() : LOperand() { }
- explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
-};
-
-
-class LDoubleStackSlot V8_FINAL : public LOperand {
- public:
- static LDoubleStackSlot* Create(int index, Zone* zone) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LDoubleStackSlot(index);
- }
-
- static LDoubleStackSlot* cast(LOperand* op) {
- ASSERT(op->IsStackSlot());
- return reinterpret_cast<LDoubleStackSlot*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LDoubleStackSlot* cache;
-
- LDoubleStackSlot() : LOperand() { }
- explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
-};
-
-
-class LRegister V8_FINAL : public LOperand {
- public:
- static LRegister* Create(int index, Zone* zone) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LRegister(index);
- }
-
- static LRegister* cast(LOperand* op) {
- ASSERT(op->IsRegister());
- return reinterpret_cast<LRegister*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static const int kNumCachedOperands = 16;
- static LRegister* cache;
-
- LRegister() : LOperand() { }
- explicit LRegister(int index) : LOperand(REGISTER, index) { }
-};
-
-
-class LDoubleRegister V8_FINAL : public LOperand {
- public:
- static LDoubleRegister* Create(int index, Zone* zone) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new(zone) LDoubleRegister(index);
- }
-
- static LDoubleRegister* cast(LOperand* op) {
- ASSERT(op->IsDoubleRegister());
- return reinterpret_cast<LDoubleRegister*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static const int kNumCachedOperands = 16;
- static LDoubleRegister* cache;
-
- LDoubleRegister() : LOperand() { }
- explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
-};
+#define LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number) \
+typedef LSubKindOperand<LOperand::type, number> L##name;
+LITHIUM_OPERAND_LIST(LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS)
+#undef LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS
class LParallelMove V8_FINAL : public ZoneObject {
@@ -679,7 +572,7 @@ class ShallowIterator V8_FINAL BASE_EMBEDDED {
private:
bool ShouldSkip(LOperand* op) {
- return op == NULL || op->IsConstantOperand() || op->IsArgument();
+ return op == NULL || op->IsConstantOperand();
}
// Skip until something interesting, beginning with and including current_.
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index 002e06243..5eae1073a 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -49,14 +49,14 @@ namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
-void SetElementNonStrict(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value) {
+void SetElementSloppy(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value) {
// Ignore return value from SetElement. It can only be a failure if there
// are element setters causing exceptions and the debugger context has none
// of these.
Handle<Object> no_failure =
- JSObject::SetElement(object, index, value, NONE, kNonStrictMode);
+ JSObject::SetElement(object, index, value, NONE, SLOPPY);
ASSERT(!no_failure.is_null());
USE(no_failure);
}
@@ -359,17 +359,17 @@ class CompareOutputArrayWriter {
void WriteChunk(int char_pos1, int char_pos2, int char_len1, int char_len2) {
Isolate* isolate = array_->GetIsolate();
- SetElementNonStrict(array_,
- current_size_,
- Handle<Object>(Smi::FromInt(char_pos1), isolate));
- SetElementNonStrict(array_,
- current_size_ + 1,
- Handle<Object>(Smi::FromInt(char_pos1 + char_len1),
- isolate));
- SetElementNonStrict(array_,
- current_size_ + 2,
- Handle<Object>(Smi::FromInt(char_pos2 + char_len2),
- isolate));
+ SetElementSloppy(array_,
+ current_size_,
+ Handle<Object>(Smi::FromInt(char_pos1), isolate));
+ SetElementSloppy(array_,
+ current_size_ + 1,
+ Handle<Object>(Smi::FromInt(char_pos1 + char_len1),
+ isolate));
+ SetElementSloppy(array_,
+ current_size_ + 2,
+ Handle<Object>(Smi::FromInt(char_pos2 + char_len2),
+ isolate));
current_size_ += 3;
}
@@ -662,20 +662,20 @@ class JSArrayBasedStruct {
protected:
void SetField(int field_position, Handle<Object> value) {
- SetElementNonStrict(array_, field_position, value);
+ SetElementSloppy(array_, field_position, value);
}
void SetSmiValueField(int field_position, int value) {
- SetElementNonStrict(array_,
- field_position,
- Handle<Smi>(Smi::FromInt(value), isolate()));
+ SetElementSloppy(array_,
+ field_position,
+ Handle<Smi>(Smi::FromInt(value), isolate()));
}
- Object* GetField(int field_position) {
- return array_->GetElementNoExceptionThrown(isolate(), field_position);
+ Handle<Object> GetField(int field_position) {
+ return Object::GetElementNoExceptionThrown(
+ isolate(), array_, field_position);
}
int GetSmiValueField(int field_position) {
- Object* res = GetField(field_position);
- CHECK(res->IsSmi());
- return Smi::cast(res)->value();
+ Handle<Object> res = GetField(field_position);
+ return Handle<Smi>::cast(res)->value();
}
private:
@@ -724,17 +724,15 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
return this->GetSmiValueField(kParentIndexOffset_);
}
Handle<Code> GetFunctionCode() {
- Object* element = this->GetField(kCodeOffset_);
- CHECK(element->IsJSValue());
- Handle<JSValue> value_wrapper(JSValue::cast(element));
+ Handle<Object> element = this->GetField(kCodeOffset_);
+ Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element);
Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
CHECK(raw_result->IsCode());
return Handle<Code>::cast(raw_result);
}
Handle<Object> GetCodeScopeInfo() {
- Object* element = this->GetField(kCodeScopeInfoOffset_);
- CHECK(element->IsJSValue());
- return UnwrapJSValue(Handle<JSValue>(JSValue::cast(element)));
+ Handle<Object> element = this->GetField(kCodeScopeInfoOffset_);
+ return UnwrapJSValue(Handle<JSValue>::cast(element));
}
int GetStartPosition() {
return this->GetSmiValueField(kStartPositionOffset_);
@@ -767,8 +765,8 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
public:
static bool IsInstance(Handle<JSArray> array) {
return array->length() == Smi::FromInt(kSize_) &&
- array->GetElementNoExceptionThrown(
- array->GetIsolate(), kSharedInfoOffset_)->IsJSValue();
+ Object::GetElementNoExceptionThrown(
+ array->GetIsolate(), array, kSharedInfoOffset_)->IsJSValue();
}
explicit SharedInfoWrapper(Handle<JSArray> array)
@@ -785,9 +783,8 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
this->SetSmiValueField(kEndPositionOffset_, end_position);
}
Handle<SharedFunctionInfo> GetInfo() {
- Object* element = this->GetField(kSharedInfoOffset_);
- CHECK(element->IsJSValue());
- Handle<JSValue> value_wrapper(JSValue::cast(element));
+ Handle<Object> element = this->GetField(kSharedInfoOffset_);
+ Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element);
return UnwrapSharedFunctionInfoFromJSValue(value_wrapper);
}
@@ -818,7 +815,7 @@ class FunctionInfoListener {
fun->materialized_literal_count(),
current_parent_index_);
current_parent_index_ = len_;
- SetElementNonStrict(result_, len_, info.GetJSArray());
+ SetElementSloppy(result_, len_, info.GetJSArray());
len_++;
}
@@ -826,8 +823,8 @@ class FunctionInfoListener {
HandleScope scope(isolate());
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(
- isolate(), current_parent_index_));
+ *Object::GetElementNoExceptionThrown(
+ isolate(), result_, current_parent_index_));
current_parent_index_ = info.GetParentIndex();
}
@@ -836,8 +833,8 @@ class FunctionInfoListener {
void FunctionCode(Handle<Code> function_code) {
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(
- isolate(), current_parent_index_));
+ *Object::GetElementNoExceptionThrown(
+ isolate(), result_, current_parent_index_));
info.SetFunctionCode(function_code,
Handle<HeapObject>(isolate()->heap()->null_value()));
}
@@ -851,8 +848,8 @@ class FunctionInfoListener {
}
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(
- isolate(), current_parent_index_));
+ *Object::GetElementNoExceptionThrown(
+ isolate(), result_, current_parent_index_));
info.SetFunctionCode(Handle<Code>(shared->code()),
Handle<HeapObject>(shared->scope_info()));
info.SetSharedFunctionInfo(shared);
@@ -885,20 +882,20 @@ class FunctionInfoListener {
context_list.Sort(&Variable::CompareIndex);
for (int i = 0; i < context_list.length(); i++) {
- SetElementNonStrict(scope_info_list,
- scope_info_length,
- context_list[i]->name());
+ SetElementSloppy(scope_info_list,
+ scope_info_length,
+ context_list[i]->name());
scope_info_length++;
- SetElementNonStrict(
+ SetElementSloppy(
scope_info_list,
scope_info_length,
Handle<Smi>(Smi::FromInt(context_list[i]->index()), isolate()));
scope_info_length++;
}
- SetElementNonStrict(scope_info_list,
- scope_info_length,
- Handle<Object>(isolate()->heap()->null_value(),
- isolate()));
+ SetElementSloppy(scope_info_list,
+ scope_info_length,
+ Handle<Object>(isolate()->heap()->null_value(),
+ isolate()));
scope_info_length++;
current_scope = current_scope->outer_scope();
@@ -959,11 +956,11 @@ JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
Handle<Smi> end_pos(Smi::FromInt(message_location.end_pos()), isolate);
Handle<JSValue> script_obj = GetScriptWrapper(message_location.script());
JSReceiver::SetProperty(
- rethrow_exception, start_pos_key, start_pos, NONE, kNonStrictMode);
+ rethrow_exception, start_pos_key, start_pos, NONE, SLOPPY);
JSReceiver::SetProperty(
- rethrow_exception, end_pos_key, end_pos, NONE, kNonStrictMode);
+ rethrow_exception, end_pos_key, end_pos, NONE, SLOPPY);
JSReceiver::SetProperty(
- rethrow_exception, script_obj_key, script_obj, NONE, kNonStrictMode);
+ rethrow_exception, script_obj_key, script_obj, NONE, SLOPPY);
}
}
@@ -987,12 +984,12 @@ void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
for (int i = 0; i < len; i++) {
Handle<SharedFunctionInfo> info(
SharedFunctionInfo::cast(
- array->GetElementNoExceptionThrown(isolate, i)));
+ *Object::GetElementNoExceptionThrown(isolate, array, i)));
SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create(isolate);
Handle<String> name_handle(String::cast(info->name()));
info_wrapper.SetProperties(name_handle, info->start_position(),
info->end_position(), info);
- SetElementNonStrict(array, i, info_wrapper.GetJSArray());
+ SetElementSloppy(array, i, info_wrapper.GetJSArray());
}
}
@@ -1361,23 +1358,24 @@ static int TranslatePosition(int original_position,
Isolate* isolate = position_change_array->GetIsolate();
// TODO(635): binary search may be used here
for (int i = 0; i < array_len; i += 3) {
- Object* element =
- position_change_array->GetElementNoExceptionThrown(isolate, i);
+ HandleScope scope(isolate);
+ Handle<Object> element = Object::GetElementNoExceptionThrown(
+ isolate, position_change_array, i);
CHECK(element->IsSmi());
- int chunk_start = Smi::cast(element)->value();
+ int chunk_start = Handle<Smi>::cast(element)->value();
if (original_position < chunk_start) {
break;
}
- element = position_change_array->GetElementNoExceptionThrown(isolate,
- i + 1);
+ element = Object::GetElementNoExceptionThrown(
+ isolate, position_change_array, i + 1);
CHECK(element->IsSmi());
- int chunk_end = Smi::cast(element)->value();
+ int chunk_end = Handle<Smi>::cast(element)->value();
// Position mustn't be inside a chunk.
ASSERT(original_position >= chunk_end);
- element = position_change_array->GetElementNoExceptionThrown(isolate,
- i + 2);
+ element = Object::GetElementNoExceptionThrown(
+ isolate, position_change_array, i + 2);
CHECK(element->IsSmi());
- int chunk_changed_end = Smi::cast(element)->value();
+ int chunk_changed_end = Handle<Smi>::cast(element)->value();
position_diff = chunk_changed_end - chunk_end;
}
@@ -1472,7 +1470,6 @@ static Handle<Code> PatchPositionsInCode(
code->instruction_start());
{
- DisallowHeapAllocation no_allocation;
for (RelocIterator it(*code); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
if (RelocInfo::IsPosition(rinfo->rmode())) {
@@ -1557,7 +1554,6 @@ static Handle<Script> CreateScriptCopy(Handle<Script> original) {
copy->set_name(original->name());
copy->set_line_offset(original->line_offset());
copy->set_column_offset(original->column_offset());
- copy->set_data(original->data());
copy->set_type(original->type());
copy->set_context_data(original->context_data());
copy->set_eval_from_shared(original->eval_from_shared());
@@ -1632,16 +1628,15 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
Isolate* isolate = shared_info_array->GetIsolate();
int len = GetArrayLength(shared_info_array);
for (int i = 0; i < len; i++) {
- Object* element =
- shared_info_array->GetElementNoExceptionThrown(isolate, i);
- CHECK(element->IsJSValue());
- Handle<JSValue> jsvalue(JSValue::cast(element));
+ HandleScope scope(isolate);
+ Handle<Object> element =
+ Object::GetElementNoExceptionThrown(isolate, shared_info_array, i);
+ Handle<JSValue> jsvalue = Handle<JSValue>::cast(element);
Handle<SharedFunctionInfo> shared =
UnwrapSharedFunctionInfoFromJSValue(jsvalue);
if (function->shared() == *shared || IsInlined(*function, *shared)) {
- SetElementNonStrict(result, i, Handle<Smi>(Smi::FromInt(status),
- isolate));
+ SetElementSloppy(result, i, Handle<Smi>(Smi::FromInt(status), isolate));
return true;
}
}
@@ -1951,11 +1946,12 @@ static const char* DropActivationsInActiveThread(
// Replace "blocked on active" with "replaced on active" status.
for (int i = 0; i < array_len; i++) {
- if (result->GetElement(result->GetIsolate(), i) ==
- Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
+ Handle<Object> obj =
+ Object::GetElementNoExceptionThrown(isolate, result, i);
+ if (*obj == Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
Handle<Object> replaced(
Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK), isolate);
- SetElementNonStrict(result, i, replaced);
+ SetElementSloppy(result, i, replaced);
}
}
return NULL;
@@ -1996,7 +1992,7 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
// Fill the default values.
for (int i = 0; i < len; i++) {
- SetElementNonStrict(
+ SetElementSloppy(
result,
i,
Handle<Smi>(Smi::FromInt(FUNCTION_AVAILABLE_FOR_PATCH), isolate));
@@ -2017,9 +2013,9 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
DropActivationsInActiveThread(shared_info_array, result, do_drop);
if (error_message != NULL) {
// Add error message as an array extra element.
- Vector<const char> vector_message(error_message, StrLength(error_message));
- Handle<String> str = isolate->factory()->NewStringFromAscii(vector_message);
- SetElementNonStrict(result, len, str);
+ Handle<String> str = isolate->factory()->NewStringFromAscii(
+ CStrVector(error_message));
+ SetElementSloppy(result, len, str);
}
return result;
}
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 1c332d173..942170c28 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -1124,8 +1124,14 @@ void Logger::LeaveExternal(Isolate* isolate) {
}
+void Logger::LogInternalEvents(const char* name, int se) {
+ Isolate* isolate = Isolate::Current();
+ LOG(isolate, TimerEvent(static_cast<StartEnd>(se), name));
+}
+
+
void Logger::TimerEventScope::LogTimerEvent(StartEnd se) {
- LOG(isolate_, TimerEvent(se, name_));
+ isolate_->event_logger()(name_, se);
}
@@ -1192,37 +1198,33 @@ void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
void Logger::LogRuntime(Vector<const char> format,
- JSArray* args) {
+ Handle<JSArray> args) {
if (!log_->IsEnabled() || !FLAG_log_runtime) return;
- HandleScope scope(isolate_);
Log::MessageBuilder msg(log_);
for (int i = 0; i < format.length(); i++) {
char c = format[i];
if (c == '%' && i <= format.length() - 2) {
i++;
ASSERT('0' <= format[i] && format[i] <= '9');
- MaybeObject* maybe = args->GetElement(isolate_, format[i] - '0');
- Object* obj;
- if (!maybe->ToObject(&obj)) {
- msg.Append("<exception>");
- continue;
- }
+ // No exception expected when getting an element from an array literal.
+ Handle<Object> obj =
+ Object::GetElementNoExceptionThrown(isolate_, args, format[i] - '0');
i++;
switch (format[i]) {
case 's':
- msg.AppendDetailed(String::cast(obj), false);
+ msg.AppendDetailed(String::cast(*obj), false);
break;
case 'S':
- msg.AppendDetailed(String::cast(obj), true);
+ msg.AppendDetailed(String::cast(*obj), true);
break;
case 'r':
- Logger::LogRegExpSource(Handle<JSRegExp>(JSRegExp::cast(obj)));
+ Logger::LogRegExpSource(Handle<JSRegExp>::cast(obj));
break;
case 'x':
- msg.Append("0x%x", Smi::cast(obj)->value());
+ msg.Append("0x%x", Smi::cast(*obj)->value());
break;
case 'i':
- msg.Append("%i", Smi::cast(obj)->value());
+ msg.Append("%i", Smi::cast(*obj)->value());
break;
default:
UNREACHABLE();
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index d4dc76a21..c01aca273 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -316,15 +316,18 @@ class Logger {
static void EnterExternal(Isolate* isolate);
static void LeaveExternal(Isolate* isolate);
+ static void EmptyLogInternalEvents(const char* name, int se) { }
+ static void LogInternalEvents(const char* name, int se);
+
class TimerEventScope {
public:
TimerEventScope(Isolate* isolate, const char* name)
: isolate_(isolate), name_(name) {
- if (FLAG_log_internal_timer_events) LogTimerEvent(START);
+ LogTimerEvent(START);
}
~TimerEventScope() {
- if (FLAG_log_internal_timer_events) LogTimerEvent(END);
+ LogTimerEvent(END);
}
void LogTimerEvent(StartEnd se);
@@ -346,7 +349,7 @@ class Logger {
void RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache);
// Log an event reported from generated code
- void LogRuntime(Vector<const char> format, JSArray* args);
+ void LogRuntime(Vector<const char> format, Handle<JSArray> args);
bool is_logging() {
return is_logging_;
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index 9fdf2ee7d..b05868c01 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -72,6 +72,14 @@ const int kInvalidProtoDepth = -1;
#include "x64/assembler-x64-inl.h"
#include "code.h" // must be after assembler_*.h
#include "x64/macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/constants-arm64.h"
+#include "assembler.h"
+#include "arm64/assembler-arm64.h"
+#include "arm64/assembler-arm64-inl.h"
+#include "code.h" // must be after assembler_*.h
+#include "arm64/macro-assembler-arm64.h"
+#include "arm64/macro-assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#include "assembler.h"
@@ -116,6 +124,7 @@ class FrameScope {
// scope, the MacroAssembler is still marked as being in a frame scope, and
// the code will be generated again when it goes out of scope.
void GenerateLeaveFrame() {
+ ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
masm_->LeaveFrame(type_);
}
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 1722c6c7d..0b69e6b80 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -162,6 +162,7 @@ macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg
macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
# Private names.
+macro GLOBAL_PRIVATE(name) = (%CreateGlobalPrivateSymbol(name));
macro NEW_PRIVATE(name) = (%CreatePrivateSymbol(name));
macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym));
macro HAS_PRIVATE(obj, sym) = (sym in obj);
diff --git a/deps/v8/src/mark-compact-inl.h b/deps/v8/src/mark-compact-inl.h
index 321309c60..a42e0f7f1 100644
--- a/deps/v8/src/mark-compact-inl.h
+++ b/deps/v8/src/mark-compact-inl.h
@@ -81,14 +81,15 @@ bool MarkCompactCollector::IsMarked(Object* obj) {
void MarkCompactCollector::RecordSlot(Object** anchor_slot,
Object** slot,
- Object* object) {
+ Object* object,
+ SlotsBuffer::AdditionMode mode) {
Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
if (object_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(anchor_slot)) {
if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
object_page->slots_buffer_address(),
slot,
- SlotsBuffer::FAIL_ON_OVERFLOW)) {
+ mode)) {
EvictEvacuationCandidate(object_page);
}
}
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index f38fa5ef1..f04a8bcb9 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -67,6 +67,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT
compacting_(false),
was_marked_incrementally_(false),
sweeping_pending_(false),
+ pending_sweeper_jobs_semaphore_(0),
sequential_sweeping_(false),
tracer_(NULL),
migration_slots_buffer_(NULL),
@@ -91,8 +92,7 @@ class VerifyMarkingVisitor: public ObjectVisitor {
void VisitEmbeddedPointer(RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(),
- rinfo->target_object())) {
+ if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
Object* p = rinfo->target_object();
VisitPointer(&p);
}
@@ -101,7 +101,7 @@ class VerifyMarkingVisitor: public ObjectVisitor {
void VisitCell(RelocInfo* rinfo) {
Code* code = rinfo->host();
ASSERT(rinfo->rmode() == RelocInfo::CELL);
- if (!Code::IsWeakEmbeddedObject(code->kind(), rinfo->target_cell())) {
+ if (!code->IsWeakObject(rinfo->target_cell())) {
ObjectVisitor::VisitCell(rinfo);
}
}
@@ -227,6 +227,10 @@ static void VerifyEvacuation(NewSpace* space) {
static void VerifyEvacuation(PagedSpace* space) {
+ // TODO(hpayer): Bring back VerifyEvacuation for parallel-concurrently
+ // swept pages.
+ if ((FLAG_concurrent_sweeping || FLAG_parallel_sweeping) &&
+ space->was_swept_conservatively()) return;
PageIterator it(space);
while (it.has_next()) {
@@ -569,6 +573,27 @@ void MarkCompactCollector::ClearMarkbits() {
}
+class MarkCompactCollector::SweeperTask : public v8::Task {
+ public:
+ SweeperTask(Heap* heap, PagedSpace* space)
+ : heap_(heap), space_(space) {}
+
+ virtual ~SweeperTask() {}
+
+ private:
+ // v8::Task overrides.
+ virtual void Run() V8_OVERRIDE {
+ heap_->mark_compact_collector()->SweepInParallel(space_);
+ heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
+ }
+
+ Heap* heap_;
+ PagedSpace* space_;
+
+ DISALLOW_COPY_AND_ASSIGN(SweeperTask);
+};
+
+
void MarkCompactCollector::StartSweeperThreads() {
// TODO(hpayer): This check is just used for debugging purpose and
// should be removed or turned into an assert after investigating the
@@ -579,6 +604,14 @@ void MarkCompactCollector::StartSweeperThreads() {
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->StartSweeping();
}
+ if (FLAG_job_based_sweeping) {
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new SweeperTask(heap(), heap()->old_data_space()),
+ v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new SweeperTask(heap(), heap()->old_pointer_space()),
+ v8::Platform::kShortRunningTask);
+ }
}
@@ -587,6 +620,12 @@ void MarkCompactCollector::WaitUntilSweepingCompleted() {
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->WaitForSweeperThread();
}
+ if (FLAG_job_based_sweeping) {
+ // Wait twice for both jobs.
+ pending_sweeper_jobs_semaphore_.Wait();
+ pending_sweeper_jobs_semaphore_.Wait();
+ }
+ ParallelSweepSpacesComplete();
sweeping_pending_ = false;
RefillFreeLists(heap()->paged_space(OLD_DATA_SPACE));
RefillFreeLists(heap()->paged_space(OLD_POINTER_SPACE));
@@ -616,7 +655,7 @@ intptr_t MarkCompactCollector::RefillFreeLists(PagedSpace* space) {
bool MarkCompactCollector::AreSweeperThreadsActivated() {
- return isolate()->sweeper_threads() != NULL;
+ return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
}
@@ -625,15 +664,17 @@ bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
}
-bool Marking::TransferMark(Address old_start, Address new_start) {
+void Marking::TransferMark(Address old_start, Address new_start) {
// This is only used when resizing an object.
ASSERT(MemoryChunk::FromAddress(old_start) ==
MemoryChunk::FromAddress(new_start));
+ if (!heap_->incremental_marking()->IsMarking()) return;
+
// If the mark doesn't move, we don't check the color of the object.
// It doesn't matter whether the object is black, since it hasn't changed
// size, so the adjustment to the live data count will be zero anyway.
- if (old_start == new_start) return false;
+ if (old_start == new_start) return;
MarkBit new_mark_bit = MarkBitFrom(new_start);
MarkBit old_mark_bit = MarkBitFrom(old_start);
@@ -646,9 +687,8 @@ bool Marking::TransferMark(Address old_start, Address new_start) {
old_mark_bit.Clear();
ASSERT(IsWhite(old_mark_bit));
Marking::MarkBlack(new_mark_bit);
- return true;
+ return;
} else if (Marking::IsGrey(old_mark_bit)) {
- ASSERT(heap_->incremental_marking()->IsMarking());
old_mark_bit.Clear();
old_mark_bit.Next().Clear();
ASSERT(IsWhite(old_mark_bit));
@@ -661,8 +701,6 @@ bool Marking::TransferMark(Address old_start, Address new_start) {
ObjectColor new_color = Color(new_mark_bit);
ASSERT(new_color == old_color);
#endif
-
- return false;
}
@@ -1825,6 +1863,10 @@ class RootMarkingVisitor : public ObjectVisitor {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
+ // Skip the weak next code link in a code object, which is visited in
+ // ProcessTopOptimizedFrame.
+ void VisitNextCodeLink(Object** p) { }
+
private:
void MarkObjectByPointer(Object** p) {
if (!(*p)->IsHeapObject()) return;
@@ -2018,7 +2060,7 @@ int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
int size = object->Size();
survivors_size += size;
- Heap::UpdateAllocationSiteFeedback(object);
+ Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
offset++;
current_cell >>= 1;
@@ -2041,8 +2083,8 @@ int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
}
Object* target = allocation->ToObjectUnchecked();
- MigrateObject(HeapObject::cast(target)->address(),
- object->address(),
+ MigrateObject(HeapObject::cast(target),
+ object,
size,
NEW_SPACE);
}
@@ -2784,19 +2826,21 @@ void MarkCompactCollector::ClearWeakCollections() {
// pointer iteration. This is an issue if the store buffer overflows and we
// have to scan the entire old space, including dead objects, looking for
// pointers to new space.
-void MarkCompactCollector::MigrateObject(Address dst,
- Address src,
+void MarkCompactCollector::MigrateObject(HeapObject* dst,
+ HeapObject* src,
int size,
AllocationSpace dest) {
+ Address dst_addr = dst->address();
+ Address src_addr = src->address();
HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
if (heap_profiler->is_tracking_object_moves()) {
- heap_profiler->ObjectMoveEvent(src, dst, size);
+ heap_profiler->ObjectMoveEvent(src_addr, dst_addr, size);
}
- ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
+ ASSERT(heap()->AllowedToBeMigrated(src, dest));
ASSERT(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
if (dest == OLD_POINTER_SPACE) {
- Address src_slot = src;
- Address dst_slot = dst;
+ Address src_slot = src_addr;
+ Address dst_slot = dst_addr;
ASSERT(IsAligned(size, kPointerSize));
for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
@@ -2817,8 +2861,8 @@ void MarkCompactCollector::MigrateObject(Address dst,
dst_slot += kPointerSize;
}
- if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
- Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
+ if (compacting_ && dst->IsJSFunction()) {
+ Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
Address code_entry = Memory::Address_at(code_entry_slot);
if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
@@ -2828,21 +2872,36 @@ void MarkCompactCollector::MigrateObject(Address dst,
code_entry_slot,
SlotsBuffer::IGNORE_OVERFLOW);
}
+ } else if (compacting_ && dst->IsConstantPoolArray()) {
+ ConstantPoolArray* constant_pool = ConstantPoolArray::cast(dst);
+ for (int i = 0; i < constant_pool->count_of_code_ptr_entries(); i++) {
+ Address code_entry_slot =
+ dst_addr + constant_pool->OffsetOfElementAt(i);
+ Address code_entry = Memory::Address_at(code_entry_slot);
+
+ if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
+ SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ &migration_slots_buffer_,
+ SlotsBuffer::CODE_ENTRY_SLOT,
+ code_entry_slot,
+ SlotsBuffer::IGNORE_OVERFLOW);
+ }
+ }
}
} else if (dest == CODE_SPACE) {
- PROFILE(isolate(), CodeMoveEvent(src, dst));
- heap()->MoveBlock(dst, src, size);
+ PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
+ heap()->MoveBlock(dst_addr, src_addr, size);
SlotsBuffer::AddTo(&slots_buffer_allocator_,
&migration_slots_buffer_,
SlotsBuffer::RELOCATED_CODE_OBJECT,
- dst,
+ dst_addr,
SlotsBuffer::IGNORE_OVERFLOW);
- Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
+ Code::cast(dst)->Relocate(dst_addr - src_addr);
} else {
ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
- heap()->MoveBlock(dst, src, size);
+ heap()->MoveBlock(dst_addr, src_addr, size);
}
- Memory::Address_at(src) = dst;
+ Memory::Address_at(src_addr) = dst_addr;
}
@@ -2977,8 +3036,8 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(),
- object->address(),
+ MigrateObject(target,
+ object,
object_size,
target_space->identity());
heap()->mark_compact_collector()->tracer()->
@@ -2994,7 +3053,7 @@ void MarkCompactCollector::EvacuateNewSpace() {
// There are soft limits in the allocation code, designed trigger a mark
// sweep collection by failing allocations. But since we are already in
// a mark-sweep allocation, there is no sense in trying to trigger one.
- AlwaysAllocateScope scope;
+ AlwaysAllocateScope scope(isolate());
heap()->CheckNewSpaceExpansionCriteria();
NewSpace* new_space = heap()->new_space();
@@ -3026,7 +3085,7 @@ void MarkCompactCollector::EvacuateNewSpace() {
void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(isolate());
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
p->MarkSweptPrecisely();
@@ -3056,8 +3115,8 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
Object* target_object = target->ToObjectUnchecked();
- MigrateObject(HeapObject::cast(target_object)->address(),
- object_addr,
+ MigrateObject(HeapObject::cast(target_object),
+ object,
size,
space->identity());
ASSERT(object->map_word().IsForwardingAddress());
@@ -3170,13 +3229,21 @@ enum SkipListRebuildingMode {
};
+enum FreeSpaceTreatmentMode {
+ IGNORE_FREE_SPACE,
+ ZAP_FREE_SPACE
+};
+
+
// Sweep a space precisely. After this has been done the space can
// be iterated precisely, hitting only the live objects. Code space
// is always swept precisely because we want to be able to iterate
// over it. Map space is swept precisely, because it is not compacted.
// Slots in live objects pointing into evacuation candidates are updated
// if requested.
-template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
+template<SweepingMode sweeping_mode,
+ SkipListRebuildingMode skip_list_mode,
+ FreeSpaceTreatmentMode free_space_mode>
static void SweepPrecisely(PagedSpace* space,
Page* p,
ObjectVisitor* v) {
@@ -3210,6 +3277,9 @@ static void SweepPrecisely(PagedSpace* space,
for ( ; live_objects != 0; live_objects--) {
Address free_end = cell_base + offsets[live_index++] * kPointerSize;
if (free_end != free_start) {
+ if (free_space_mode == ZAP_FREE_SPACE) {
+ memset(free_start, 0xcc, static_cast<int>(free_end - free_start));
+ }
space->Free(free_start, static_cast<int>(free_end - free_start));
#ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
@@ -3241,6 +3311,9 @@ static void SweepPrecisely(PagedSpace* space,
*cell = 0;
}
if (free_start != p->area_end()) {
+ if (free_space_mode == ZAP_FREE_SPACE) {
+ memset(free_start, 0xcc, static_cast<int>(p->area_end() - free_start));
+ }
space->Free(free_start, static_cast<int>(p->area_end() - free_start));
#ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
@@ -3386,13 +3459,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
EvacuateNewSpace();
}
- // We have to travers our allocation sites scratchpad which contains raw
- // pointers before we move objects. During new space evacauation we
- // gathered pretenuring statistics. The found allocation sites may not be
- // valid after compacting old space.
- heap()->ProcessPretenuringFeedback();
-
-
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
EvacuatePages();
}
@@ -3493,12 +3559,23 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
break;
case OLD_POINTER_SPACE:
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
+ SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
+ IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(
space, p, &updating_visitor);
break;
case CODE_SPACE:
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
- space, p, &updating_visitor);
+ if (FLAG_zap_code_space) {
+ SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
+ REBUILD_SKIP_LIST,
+ ZAP_FREE_SPACE>(
+ space, p, &updating_visitor);
+ } else {
+ SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
+ REBUILD_SKIP_LIST,
+ IGNORE_FREE_SPACE>(
+ space, p, &updating_visitor);
+ }
break;
default:
UNREACHABLE();
@@ -3919,7 +3996,11 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
(mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
free_list == NULL));
- p->MarkSweptConservatively();
+ // When parallel sweeping is active, the page will be marked after
+ // sweeping by the main thread.
+ if (mode != MarkCompactCollector::SWEEP_IN_PARALLEL) {
+ p->MarkSweptConservatively();
+ }
intptr_t freed_bytes = 0;
size_t size = 0;
@@ -4009,6 +4090,7 @@ void MarkCompactCollector::SweepInParallel(PagedSpace* space) {
if (p->TryParallelSweeping()) {
SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p);
free_list->Concatenate(&private_free_list);
+ p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
}
}
}
@@ -4031,7 +4113,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
while (it.has_next()) {
Page* p = it.next();
- ASSERT(p->parallel_sweeping() == 0);
+ ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
ASSERT(!p->IsEvacuationCandidate());
// Clear sweeping flags indicating that marking bits are still intact.
@@ -4104,7 +4186,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
reinterpret_cast<intptr_t>(p));
}
- p->set_parallel_sweeping(1);
+ p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING);
space->IncreaseUnsweptFreeBytes(p);
}
break;
@@ -4114,10 +4196,15 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
reinterpret_cast<intptr_t>(p));
}
- if (space->identity() == CODE_SPACE) {
- SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
+ if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
+ SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(
+ space, p, NULL);
+ } else if (space->identity() == CODE_SPACE) {
+ SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(
+ space, p, NULL);
} else {
- SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
+ SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
+ space, p, NULL);
}
pages_swept++;
break;
@@ -4146,7 +4233,7 @@ void MarkCompactCollector::SweepSpaces() {
#endif
SweeperType how_to_sweep =
FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
- if (isolate()->num_sweeper_threads() > 0) {
+ if (AreSweeperThreadsActivated()) {
if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
}
@@ -4161,20 +4248,22 @@ void MarkCompactCollector::SweepSpaces() {
// the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
- SequentialSweepingScope scope(this);
- SweepSpace(heap()->old_pointer_space(), how_to_sweep);
- SweepSpace(heap()->old_data_space(), how_to_sweep);
+ { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_OLDSPACE);
+ { SequentialSweepingScope scope(this);
+ SweepSpace(heap()->old_pointer_space(), how_to_sweep);
+ SweepSpace(heap()->old_data_space(), how_to_sweep);
+ }
- if (how_to_sweep == PARALLEL_CONSERVATIVE ||
- how_to_sweep == CONCURRENT_CONSERVATIVE) {
- // TODO(hpayer): fix race with concurrent sweeper
- StartSweeperThreads();
- }
+ if (how_to_sweep == PARALLEL_CONSERVATIVE ||
+ how_to_sweep == CONCURRENT_CONSERVATIVE) {
+ // TODO(hpayer): fix race with concurrent sweeper
+ StartSweeperThreads();
+ }
- if (how_to_sweep == PARALLEL_CONSERVATIVE) {
- WaitUntilSweepingCompleted();
+ if (how_to_sweep == PARALLEL_CONSERVATIVE) {
+ WaitUntilSweepingCompleted();
+ }
}
-
RemoveDeadInvalidatedCode();
SweepSpace(heap()->code_space(), PRECISE);
@@ -4196,6 +4285,25 @@ void MarkCompactCollector::SweepSpaces() {
}
+void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
+ PageIterator it(space);
+ while (it.has_next()) {
+ Page* p = it.next();
+ if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_FINALIZE) {
+ p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_DONE);
+ p->MarkSweptConservatively();
+ }
+ ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
+ }
+}
+
+
+void MarkCompactCollector::ParallelSweepSpacesComplete() {
+ ParallelSweepSpaceComplete(heap()->old_pointer_space());
+ ParallelSweepSpaceComplete(heap()->old_data_space());
+}
+
+
void MarkCompactCollector::EnableCodeFlushing(bool enable) {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (isolate()->debug()->IsLoaded() ||
@@ -4290,14 +4398,33 @@ static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+ RelocInfo::Mode rmode = rinfo->rmode();
if (target_page->IsEvacuationCandidate() &&
(rinfo->host() == NULL ||
!ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
- if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
- target_page->slots_buffer_address(),
- SlotTypeForRMode(rinfo->rmode()),
- rinfo->pc(),
- SlotsBuffer::FAIL_ON_OVERFLOW)) {
+ bool success;
+ if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
+ // This doesn't need to be typed since it is just a normal heap pointer.
+ Object** target_pointer =
+ reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
+ success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ target_page->slots_buffer_address(),
+ target_pointer,
+ SlotsBuffer::FAIL_ON_OVERFLOW);
+ } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
+ success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ target_page->slots_buffer_address(),
+ SlotsBuffer::CODE_ENTRY_SLOT,
+ rinfo->constant_pool_entry_address(),
+ SlotsBuffer::FAIL_ON_OVERFLOW);
+ } else {
+ success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ target_page->slots_buffer_address(),
+ SlotTypeForRMode(rmode),
+ rinfo->pc(),
+ SlotsBuffer::FAIL_ON_OVERFLOW);
+ }
+ if (!success) {
EvictEvacuationCandidate(target_page);
}
}
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index 0773d0266..0ebe8a0f7 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -110,8 +110,7 @@ class Marking {
markbit.Next().Set();
}
- // Returns true if the the object whose mark is transferred is marked black.
- bool TransferMark(Address old_start, Address new_start);
+ void TransferMark(Address old_start, Address new_start);
#ifdef DEBUG
enum ObjectColor {
@@ -690,10 +689,14 @@ class MarkCompactCollector {
void RecordCodeEntrySlot(Address slot, Code* target);
void RecordCodeTargetPatch(Address pc, Code* target);
- INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
+ INLINE(void RecordSlot(Object** anchor_slot,
+ Object** slot,
+ Object* object,
+ SlotsBuffer::AdditionMode mode =
+ SlotsBuffer::FAIL_ON_OVERFLOW));
- void MigrateObject(Address dst,
- Address src,
+ void MigrateObject(HeapObject* dst,
+ HeapObject* src,
int size,
AllocationSpace to_old_space);
@@ -744,6 +747,8 @@ class MarkCompactCollector {
void MarkAllocationSite(AllocationSite* site);
private:
+ class SweeperTask;
+
explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector();
@@ -791,6 +796,8 @@ class MarkCompactCollector {
// True if concurrent or parallel sweeping is currently in progress.
bool sweeping_pending_;
+ Semaphore pending_sweeper_jobs_semaphore_;
+
bool sequential_sweeping_;
// A pointer to the current stack-allocated GC tracer object during a full
@@ -940,6 +947,12 @@ class MarkCompactCollector {
void SweepSpace(PagedSpace* space, SweeperType sweeper);
+ // Finalizes the parallel sweeping phase. Marks all the pages that were
+ // swept in parallel.
+ void ParallelSweepSpacesComplete();
+
+ void ParallelSweepSpaceComplete(PagedSpace* space);
+
#ifdef DEBUG
friend class MarkObjectVisitor;
static void VisitObject(HeapObject* obj);
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 3f4484a09..0077d0309 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -61,7 +61,6 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
- Handle<String> stack_trace,
Handle<JSArray> stack_frames) {
Factory* factory = isolate->factory();
Handle<String> type_handle = factory->InternalizeUtf8String(type);
@@ -82,10 +81,6 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
script_handle = GetScriptWrapper(loc->script());
}
- Handle<Object> stack_trace_handle = stack_trace.is_null()
- ? Handle<Object>::cast(factory->undefined_value())
- : Handle<Object>::cast(stack_trace);
-
Handle<Object> stack_frames_handle = stack_frames.is_null()
? Handle<Object>::cast(factory->undefined_value())
: Handle<Object>::cast(stack_frames);
@@ -96,7 +91,6 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
start,
end,
script_handle,
- stack_trace_handle,
stack_frames_handle);
return message;
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 5d84e46ca..2f4be518b 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -95,7 +95,6 @@ class MessageHandler {
const char* type,
MessageLocation* loc,
Vector< Handle<Object> > args,
- Handle<String> stack_trace,
Handle<JSArray> stack_frames);
// Report a formatted message (needs JS allocation).
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index e9f1ae46c..a389bb8fe 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -45,10 +45,6 @@ var kMessages = {
unterminated_regexp: ["Invalid regular expression: missing /"],
regexp_flags: ["Cannot supply flags when constructing one RegExp from another"],
incompatible_method_receiver: ["Method ", "%0", " called on incompatible receiver ", "%1"],
- invalid_lhs_in_assignment: ["Invalid left-hand side in assignment"],
- invalid_lhs_in_for_in: ["Invalid left-hand side in for-in"],
- invalid_lhs_in_postfix_op: ["Invalid left-hand side expression in postfix operation"],
- invalid_lhs_in_prefix_op: ["Invalid left-hand side expression in prefix operation"],
multiple_defaults_in_switch: ["More than one default clause in switch statement"],
newline_after_throw: ["Illegal newline after throw"],
redeclaration: ["%0", " '", "%1", "' has already been declared"],
@@ -64,7 +60,6 @@ var kMessages = {
not_defined: ["%0", " is not defined"],
non_object_property_load: ["Cannot read property '", "%0", "' of ", "%1"],
non_object_property_store: ["Cannot set property '", "%0", "' of ", "%1"],
- non_object_property_call: ["Cannot call method '", "%0", "' of ", "%1"],
with_expression: ["%0", " has no properties"],
illegal_invocation: ["Illegal invocation"],
no_setter_in_callback: ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
@@ -108,6 +103,7 @@ var kMessages = {
invalid_argument: ["invalid_argument"],
data_view_not_array_buffer: ["First argument to DataView constructor must be an ArrayBuffer"],
constructor_not_function: ["Constructor ", "%0", " requires 'new'"],
+ not_a_symbol: ["%0", " is not a symbol"],
not_a_promise: ["%0", " is not a promise"],
resolver_not_a_function: ["Promise resolver ", "%0", " is not a function"],
promise_cyclic: ["Chaining cycle detected for promise ", "%0"],
@@ -120,7 +116,7 @@ var kMessages = {
invalid_string_length: ["Invalid string length"],
invalid_typed_array_offset: ["Start offset is too large:"],
invalid_typed_array_length: ["Invalid typed array length"],
- invalid_typed_array_alignment: ["%0", "of", "%1", "should be a multiple of", "%3"],
+ invalid_typed_array_alignment: ["%0", " of ", "%1", " should be a multiple of ", "%2"],
typed_array_set_source_too_large:
["Source is too large"],
typed_array_set_negative_offset:
@@ -133,6 +129,11 @@ var kMessages = {
stack_overflow: ["Maximum call stack size exceeded"],
invalid_time_value: ["Invalid time value"],
invalid_count_value: ["Invalid count value"],
+ // ReferenceError
+ invalid_lhs_in_assignment: ["Invalid left-hand side in assignment"],
+ invalid_lhs_in_for: ["Invalid left-hand side in for-loop"],
+ invalid_lhs_in_postfix_op: ["Invalid left-hand side expression in postfix operation"],
+ invalid_lhs_in_prefix_op: ["Invalid left-hand side expression in prefix operation"],
// SyntaxError
paren_in_arg_string: ["Function arg string contains parenthesis"],
not_isvar: ["builtin %IS_VAR: not a variable"],
@@ -155,9 +156,9 @@ var kMessages = {
invalid_preparser_data: ["Invalid preparser data for function ", "%0"],
strict_mode_with: ["Strict mode code may not include a with statement"],
strict_eval_arguments: ["Unexpected eval or arguments in strict mode"],
- too_many_arguments: ["Too many arguments in function call (only 32766 allowed)"],
- too_many_parameters: ["Too many parameters in function definition (only 32766 allowed)"],
- too_many_variables: ["Too many variables declared (only 131071 allowed)"],
+ too_many_arguments: ["Too many arguments in function call (only 65535 allowed)"],
+ too_many_parameters: ["Too many parameters in function definition (only 65535 allowed)"],
+ too_many_variables: ["Too many variables declared (only 4194303 allowed)"],
strict_param_dupe: ["Strict mode function may not have duplicate parameter names"],
strict_octal_literal: ["Octal literals are not allowed in strict mode."],
strict_duplicate_property: ["Duplicate data property in object literal not allowed in strict mode"],
@@ -176,7 +177,8 @@ var kMessages = {
cant_prevent_ext_external_array_elements: ["Cannot prevent extension of an object with external array elements"],
redef_external_array_element: ["Cannot redefine a property of an object with external array elements"],
harmony_const_assign: ["Assignment to constant variable."],
- symbol_to_string: ["Conversion from symbol to string"],
+ symbol_to_string: ["Cannot convert a Symbol value to a string"],
+ symbol_to_primitive: ["Cannot convert a Symbol wrapper object to a primitive value"],
invalid_module_path: ["Module does not export '", "%0", "', or export is not itself a module"],
module_type_error: ["Module '", "%0", "' used improperly"],
module_export_undefined: ["Export '", "%0", "' is not defined in module"]
@@ -786,11 +788,10 @@ function GetStackTraceLine(recv, fun, pos, isGlobal) {
// ----------------------------------------------------------------------------
// Error implementation
-//TODO(rossberg)
-var CallSiteReceiverKey = NEW_PRIVATE("receiver");
-var CallSiteFunctionKey = NEW_PRIVATE("function");
-var CallSitePositionKey = NEW_PRIVATE("position");
-var CallSiteStrictModeKey = NEW_PRIVATE("strict mode");
+var CallSiteReceiverKey = NEW_PRIVATE("CallSite#receiver");
+var CallSiteFunctionKey = NEW_PRIVATE("CallSite#function");
+var CallSitePositionKey = NEW_PRIVATE("CallSite#position");
+var CallSiteStrictModeKey = NEW_PRIVATE("CallSite#strict_mode");
function CallSite(receiver, fun, pos, strict_mode) {
SET_PRIVATE(this, CallSiteReceiverKey, receiver);
@@ -939,14 +940,10 @@ function CallSiteToString() {
if (this.isNative()) {
fileLocation = "native";
} else {
- if (this.isEval()) {
- fileName = this.getScriptNameOrSourceURL();
- if (!fileName) {
- fileLocation = this.getEvalOrigin();
- fileLocation += ", "; // Expecting source position to follow.
- }
- } else {
- fileName = this.getFileName();
+ fileName = this.getScriptNameOrSourceURL();
+ if (!fileName && this.isEval()) {
+ fileLocation = this.getEvalOrigin();
+ fileLocation += ", "; // Expecting source position to follow.
}
if (fileName) {
@@ -1077,15 +1074,15 @@ function FormatErrorString(error) {
function GetStackFrames(raw_stack) {
var frames = new InternalArray();
- var non_strict_frames = raw_stack[0];
+ var sloppy_frames = raw_stack[0];
for (var i = 1; i < raw_stack.length; i += 4) {
var recv = raw_stack[i];
var fun = raw_stack[i + 1];
var code = raw_stack[i + 2];
var pc = raw_stack[i + 3];
var pos = %FunctionGetPositionForOffset(code, pc);
- non_strict_frames--;
- frames.push(new CallSite(recv, fun, pos, (non_strict_frames < 0)));
+ sloppy_frames--;
+ frames.push(new CallSite(recv, fun, pos, (sloppy_frames < 0)));
}
return frames;
}
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 514b3aaa4..f7f435413 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -128,7 +128,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -156,6 +156,12 @@ Address RelocInfo::target_address_address() {
}
+Address RelocInfo::constant_pool_entry_address() {
+ UNREACHABLE();
+ return NULL;
+}
+
+
int RelocInfo::target_address_size() {
return Assembler::kSpecialTargetSize;
}
@@ -163,7 +169,7 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, target);
+ Assembler::set_target_address_at(pc_, host_, target);
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -179,21 +185,22 @@ Address Assembler::target_address_from_return_address(Address pc) {
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
- Assembler::target_address_at(pc_)));
+ Assembler::target_address_at(pc_, host_)));
}
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
- Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+ Assembler::set_target_address_at(pc_, host_,
+ reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
@@ -205,7 +212,7 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
Address RelocInfo::target_reference() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -260,13 +267,14 @@ Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + Assembler::kInstrSize));
+ Assembler::target_address_at(pc_ + Assembler::kInstrSize, host_));
}
void RelocInfo::set_code_age_stub(Code* stub) {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Assembler::set_target_address_at(pc_ + Assembler::kInstrSize,
+ host_,
stub->instruction_start());
}
@@ -277,7 +285,7 @@ Address RelocInfo::call_address() {
// The pc_ offset of 0 assumes mips patched return sequence per
// debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
// debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -287,7 +295,7 @@ void RelocInfo::set_call_address(Address target) {
// The pc_ offset of 0 assumes mips patched return sequence per
// debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
// debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
- Assembler::set_target_address_at(pc_, target);
+ Assembler::set_target_address_at(pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -318,7 +326,7 @@ void RelocInfo::WipeOut() {
IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) ||
IsExternalReference(rmode_));
- Assembler::set_target_address_at(pc_, NULL);
+ Assembler::set_target_address_at(pc_, host_, NULL);
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index f551dd5e1..b659559fe 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -213,6 +213,11 @@ bool RelocInfo::IsCodedSpecially() {
}
+bool RelocInfo::IsInConstantPool() {
+ return false;
+}
+
+
// Patch the code at the current address with the supplied instructions.
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
Instr* pc = reinterpret_cast<Instr*>(pc_);
@@ -313,11 +318,12 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
trampoline_pool_blocked_nesting_ = 0;
// We leave space (16 * kTrampolineSlotsSize)
// for BlockTrampolinePoolScope buffer.
- next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16;
+ next_buffer_check_ = FLAG_force_long_branches
+ ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
internal_trampoline_exception_ = false;
last_bound_pos_ = 0;
- trampoline_emitted_ = false;
+ trampoline_emitted_ = FLAG_force_long_branches;
unbound_labels_count_ = 0;
block_buffer_growth_ = false;
@@ -2321,6 +2327,20 @@ void Assembler::JumpLabelToJumpRegister(Address pc) {
}
}
+
+MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 70f77eaed..ea956e135 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -37,6 +37,7 @@
#define V8_MIPS_ASSEMBLER_MIPS_H_
#include <stdio.h>
+
#include "assembler.h"
#include "constants-mips.h"
#include "serialize.h"
@@ -526,6 +527,26 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
static Address target_address_at(Address pc);
static void set_target_address_at(Address pc, Address target);
+ // On MIPS there is no Constant Pool so we skip that parameter.
+ INLINE(static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool)) {
+ return target_address_at(pc);
+ }
+ INLINE(static void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target)) {
+ set_target_address_at(pc, target);
+ }
+ INLINE(static Address target_address_at(Address pc, Code* code)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ INLINE(static void set_target_address_at(Address pc,
+ Code* code,
+ Address target)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target);
+ }
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -539,9 +560,10 @@ class Assembler : public AssemblerBase {
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Address target) {
+ Address instruction_payload, Code* code, Address target) {
set_target_address_at(
instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
+ code,
target);
}
@@ -984,6 +1006,12 @@ class Assembler : public AssemblerBase {
void CheckTrampolinePool();
+ // Allocate a constant pool of the correct size for the generated code.
+ MaybeObject* AllocateConstantPool(Heap* heap);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 7a097a35a..03d6cc80d 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -163,10 +163,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// Tail call a stub.
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ li(a2, Operand(undefined_sentinel));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -335,7 +332,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(t0));
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
@@ -345,10 +342,12 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions) {
+ bool count_constructions,
+ bool create_memento) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
+ // -- a2 : allocation site or undefined
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -356,6 +355,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
+
+ // Should never create mementos before slack tracking is finished.
+ ASSERT(!count_constructions || !create_memento);
+
Isolate* isolate = masm->isolate();
// ----------- S t a t e -------------
@@ -369,6 +374,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(a2, a3);
+ __ push(a2);
+ }
+
// Preserve the two incoming parameters on the stack.
__ sll(a0, a0, kSmiTagSize); // Tag arguments count.
__ MultiPushReversed(a0.bit() | a1.bit());
@@ -417,7 +427,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Push(a1, a2, a1); // a1 = Constructor.
// The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
__ Pop(a1, a2);
@@ -428,13 +438,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a1: constructor function
// a2: initial map
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ if (create_memento) {
+ __ Addu(a3, a3, Operand(AllocationMemento::kSize / kPointerSize));
+ }
+
__ Allocate(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
// a1: constructor function
// a2: initial map
- // a3: object size
+ // a3: object size (not including memento if create_memento)
// t4: JSObject (not tagged)
__ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
__ mov(t5, t4);
@@ -449,19 +463,20 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Fill all the in-object properties with appropriate filler.
// a1: constructor function
// a2: initial map
- // a3: object size (in words)
+ // a3: object size (in words, including memento if create_memento)
// t4: JSObject (not tagged)
// t5: First in-object property of JSObject (not tagged)
- __ sll(t0, a3, kPointerSizeLog2);
- __ addu(t6, t4, t0); // End of object.
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+
if (count_constructions) {
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
__ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
__ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
kBitsPerByte);
- __ sll(t0, a0, kPointerSizeLog2);
- __ addu(a0, t5, t0);
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(a0, t5, at);
+ __ sll(at, a3, kPointerSizeLog2);
+ __ Addu(t6, t4, Operand(at)); // End of object.
// a0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
__ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields,
@@ -470,8 +485,31 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ InitializeFieldsWithFiller(t5, a0, t7);
// To allow for truncation.
__ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(t5, t6, t7);
+ } else if (create_memento) {
+ __ Subu(t7, a3, Operand(AllocationMemento::kSize / kPointerSize));
+ __ sll(at, t7, kPointerSizeLog2);
+ __ Addu(a0, t4, Operand(at)); // End of object.
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ __ InitializeFieldsWithFiller(t5, a0, t7);
+
+ // Fill in memento fields.
+ // t5: points to the allocated but uninitialized memento.
+ __ LoadRoot(t7, Heap::kAllocationMementoMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
+ __ sw(t7, MemOperand(t5));
+ __ Addu(t5, t5, kPointerSize);
+ // Load the AllocationSite.
+ __ lw(t7, MemOperand(sp, 2 * kPointerSize));
+ ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
+ __ sw(t7, MemOperand(t5));
+ __ Addu(t5, t5, kPointerSize);
+ } else {
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ __ sll(at, a3, kPointerSizeLog2);
+ __ Addu(a0, t4, Operand(at)); // End of object.
+ __ InitializeFieldsWithFiller(t5, a0, t7);
}
- __ InitializeFieldsWithFiller(t5, t6, t7);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -575,15 +613,48 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ UndoAllocationInNewSpace(t4, t5);
}
- __ bind(&rt_call);
// Allocate the new receiver object using the runtime call.
// a1: constructor function
+ __ bind(&rt_call);
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ lw(a2, MemOperand(sp, 2 * kPointerSize));
+ __ push(a2);
+ }
+
__ push(a1); // Argument for Runtime_NewObject.
- __ CallRuntime(Runtime::kNewObject, 1);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ }
__ mov(t4, v0);
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
// Receiver for constructor call allocated.
// t4: JSObject
+
+ if (create_memento) {
+ __ lw(a2, MemOperand(sp, kPointerSize * 2));
+ __ LoadRoot(t5, Heap::kUndefinedValueRootIndex);
+ __ Branch(&count_incremented, eq, a2, Operand(t5));
+ // a2 is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ lw(a3, FieldMemOperand(a2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ Addu(a3, a3, Operand(Smi::FromInt(1)));
+ __ sw(a3, FieldMemOperand(a2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ bind(&count_incremented);
+ }
+
__ bind(&allocated);
__ Push(t4, t4);
@@ -685,17 +756,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
@@ -757,9 +828,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(a0, a3);
if (is_construct) {
// No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- __ li(a2, Operand(undefined_sentinel));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
} else {
@@ -785,7 +854,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
GenerateTailCallToReturnedCode(masm);
}
@@ -798,7 +867,7 @@ static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
// Whether to compile in a background thread.
__ Push(masm->isolate()->factory()->ToBoolean(concurrent));
- __ CallRuntime(Runtime::kCompileOptimized, 2);
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
// Restore receiver.
__ Pop(a1);
}
@@ -907,7 +976,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// registers.
__ MultiPush(kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ MultiPop(kJSCallerSaved | kCalleeSaved);
}
@@ -933,7 +1002,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the function and deoptimization type to the runtime system.
__ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(a0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
}
// Get the full codegen state from the stack and untag it -> t2.
@@ -1015,7 +1084,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ Branch(&ok, hs, sp, Operand(at));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1067,7 +1136,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ And(t3, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
// Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
__ sll(at, a0, kPointerSizeLog2);
__ addu(a2, sp, at);
@@ -1270,7 +1339,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ And(t3, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ Branch(&push_receiver, ne, t3, Operand(zero_reg));
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ JumpIfSmi(a0, &call_to_object);
__ LoadRoot(a1, Heap::kNullValueRootIndex);
__ Branch(&use_global_receiver, eq, a0, Operand(a1));
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index e38f18191..332ed4b6a 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -46,7 +46,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
}
@@ -77,7 +77,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
}
@@ -88,7 +88,8 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
}
@@ -99,15 +100,15 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
}
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a2 };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { a2, a3 };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
@@ -142,7 +143,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
}
@@ -166,6 +167,26 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
+void StringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0, a2 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -227,7 +248,7 @@ static void InitializeArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
}
@@ -255,7 +276,7 @@ static void InitializeInternalArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
}
@@ -366,7 +387,7 @@ void StringAddStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
}
@@ -1586,21 +1607,9 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
}
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ andi(scratch, value, 0xf);
- __ Branch(oom_label, eq, scratch, Operand(0xf));
-}
-
-
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate) {
// v0: result parameter for PerformGC, if any
@@ -1703,17 +1712,11 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
__ Branch(&retry, eq, t0, Operand(zero_reg));
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
-
// Retrieve the pending exception.
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ lw(v0, MemOperand(t0));
- // See if we just retrieved an OOM exception.
- JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
-
// Clear the pending exception.
__ li(a3, Operand(isolate->factory()->the_hole_value()));
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
@@ -1767,13 +1770,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Label throw_normal_exception;
Label throw_termination_exception;
- Label throw_out_of_memory_exception;
// Call into the runtime system.
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
false,
false);
@@ -1781,7 +1782,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
false);
@@ -1791,29 +1791,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
true);
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ li(a0, Operand(false, RelocInfo::NONE32));
- __ li(a2, Operand(external_caught));
- __ sw(a0, MemOperand(a2));
-
- // Set pending exception and v0 to out of memory exception.
- Label already_have_failure;
- JumpIfOOM(masm, v0, t0, &already_have_failure);
- Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
- __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ bind(&already_have_failure);
- __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ sw(v0, MemOperand(a2));
- // Fall through to the next label.
+ { FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, v0);
+ __ CallCFunction(
+ ExternalReference::out_of_memory_function(masm->isolate()), 0);
+ }
__ bind(&throw_termination_exception);
__ ThrowUncatchable(v0);
@@ -2204,108 +2189,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- __ Branch(&miss, ne, a0,
- Operand(masm->isolate()->factory()->length_string()));
- receiver = a1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = a0;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss);
-
- __ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
- Label miss;
-
- Register receiver;
- Register value;
- if (kind() == Code::KEYED_STORE_IC) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -----------------------------------
- __ Branch(&miss, ne, a1,
- Operand(masm->isolate()->factory()->length_string()));
- receiver = a2;
- value = a0;
- } else {
- ASSERT(kind() == Code::STORE_IC);
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : key
- // -----------------------------------
- receiver = a1;
- value = a0;
- }
- Register scratch = a3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ GetObjectType(receiver, scratch, scratch);
- __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ GetObjectType(scratch, scratch, scratch);
- __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&miss, eq, scratch, Operand(at));
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
Register InstanceofStub::left() { return a0; }
@@ -2365,7 +2248,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
@@ -2387,11 +2270,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
__ sw(a3, MemOperand(sp, 1 * kPointerSize));
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Stack layout:
// sp[0] : number of parameters (tagged)
// sp[4] : address of receiver argument
@@ -2455,7 +2338,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
// 3. Arguments object.
- __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
+ __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
@@ -2464,7 +2347,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// a2 = argument count (tagged)
// Get the arguments boilerplate from the current native context into t0.
const int kNormalOffset =
- Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX);
const int kAliasedOffset =
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
@@ -2505,7 +2388,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, t0 will point there, otherwise
// it will point to the backing store.
- __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
+ __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
// v0 = address of new object (tagged)
@@ -2523,7 +2406,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
- __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex);
__ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
__ Addu(t2, a1, Operand(Smi::FromInt(2)));
__ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
@@ -2606,7 +2489,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// a2 = argument count (tagged)
__ bind(&runtime);
__ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
@@ -2646,7 +2529,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
+ __ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
// Do the allocation of both objects in one go.
__ Allocate(a1, v0, a2, a3, &runtime,
@@ -2656,7 +2539,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
__ lw(t0, MemOperand(t0, Context::SlotOffset(
- Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
+ Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX)));
// Copy the JS object part.
__ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
@@ -2675,7 +2558,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
+ __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
__ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
__ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
@@ -2704,7 +2587,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
}
@@ -2713,7 +2596,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -3105,7 +2988,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -3152,83 +3035,101 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a0 : number of arguments to the construct function
// a1 : the function to call
- // a2 : cache cell for call target
+ // a2 : Feedback vector
+ // a3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->uninitialized_symbol());
- // Load the cache state into a3.
- __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
+ // Load the cache state into t0.
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ Branch(&done, eq, a3, Operand(a1));
-
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in a3.
- __ lw(t1, FieldMemOperand(a3, 0));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&miss, ne, t1, Operand(at));
-
- // Make sure the function is the Array() function
- __ LoadArrayFunction(a3);
- __ Branch(&megamorphic, ne, a1, Operand(a3));
- __ jmp(&done);
+ __ Branch(&done, eq, t0, Operand(a1));
+
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in a3.
+ __ lw(t1, FieldMemOperand(t0, 0));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&miss, ne, t1, Operand(at));
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
+ __ Branch(&megamorphic, ne, a1, Operand(t0));
+ __ jmp(&done);
+ }
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&initialize, eq, a3, Operand(at));
+ __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
+ __ Branch(&initialize, eq, t0, Operand(at));
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+ __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
__ jmp(&done);
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
+ // An uninitialized cache is patched with the function.
__ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(a3);
- __ Branch(&not_array_function, ne, a1, Operand(a3));
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
+ __ Branch(&not_array_function, ne, a1, Operand(t0));
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ const RegList kSavedRegs =
+ 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7; // a3
- // The target function is the Array constructor.
- // Create an AllocationSite if we don't already have it, store it in the cell.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- const RegList kSavedRegs =
- 1 << 4 | // a0
- 1 << 5 | // a1
- 1 << 6; // a2
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(a0);
+ __ MultiPush(kSavedRegs);
- // Arguments register must be smi-tagged to call out.
- __ SmiTag(a0);
- __ MultiPush(kSavedRegs);
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
- CreateAllocationSiteStub create_stub;
- __ CallStub(&create_stub);
+ __ MultiPop(kSavedRegs);
+ __ SmiUntag(a0);
+ }
+ __ Branch(&done);
- __ MultiPop(kSavedRegs);
- __ SmiUntag(a0);
+ __ bind(&not_array_function);
}
- __ Branch(&done);
- __ bind(&not_array_function);
- __ sw(a1, FieldMemOperand(a2, Cell::kValueOffset));
- // No need for a write barrier here - cells are rescanned.
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sw(a1, MemOperand(t0, 0));
+
+ __ Push(t0, a2, a1);
+ __ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(t0, a2, a1);
__ bind(&done);
}
@@ -3236,7 +3137,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
void CallFunctionStub::Generate(MacroAssembler* masm) {
// a1 : the function to call
- // a2 : cache cell for call target
+ // a2 : feedback vector
+ // a3 : (only if a2 is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
Label slow, non_function, wrap, cont;
if (NeedsChecks()) {
@@ -3245,11 +3148,15 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(a1, &non_function);
// Goto slow case if we do not have a function.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
+ __ GetObjectType(a1, t0, t0);
+ __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+ // Type information was updated. Because we may call Array, which
+ // expects either undefined or an AllocationSite in a2 we need
+ // to set a2 to undefined.
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
}
}
@@ -3269,7 +3176,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ Branch(&cont, ne, at, Operand(zero_reg));
}
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ lw(a3, MemOperand(sp, argc_ * kPointerSize));
if (NeedsChecks()) {
@@ -3290,14 +3197,16 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
+ // object (megamorphic symbol) so no write barrier is needed.
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ __ sll(t1, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t1, a2, Operand(t1));
+ __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+ __ sw(at, FieldMemOperand(t1, FixedArray::kHeaderSize));
}
// Check for function proxy.
- __ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ Branch(&non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
__ push(a1); // Put proxy as additional argument.
__ li(a0, Operand(argc_ + 1, RelocInfo::NONE32));
__ li(a2, Operand(0, RelocInfo::NONE32));
@@ -3337,21 +3246,42 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// a0 : number of arguments
// a1 : the function to call
- // a2 : cache cell for call target
+ // a2 : feedback vector
+ // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(a1, &non_function_call);
// Check that the function is a JSFunction.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
+ __ GetObjectType(a1, t0, t0);
+ __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+
+ __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t1, a2, at);
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into a2.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by a3 + 1.
+ __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into a2, or undefined.
+ __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
+ __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(a2, t1);
}
// Jump to the function-specific construct stub.
- Register jmp_reg = a3;
+ Register jmp_reg = t0;
__ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(jmp_reg, FieldMemOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@@ -3360,10 +3290,10 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// a0: number of arguments
// a1: called object
- // a3: object type
+ // t0: object type
Label do_call;
__ bind(&slow);
- __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
__ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
@@ -3441,7 +3371,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
} else {
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
}
// Save the conversion result before the pop instructions below
@@ -3465,7 +3395,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ sll(index_, index_, kSmiTagSize);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
__ Move(result_, v0);
@@ -3900,7 +3830,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
__ bind(&single_char);
// v0: original string
@@ -4065,7 +3995,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
@@ -4576,7 +4506,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
__ bind(&miss);
@@ -5008,7 +4938,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
@@ -5021,13 +4951,13 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ Ret();
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
@@ -5041,18 +4971,10 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
__ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
@@ -5361,7 +5283,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
+ // Fix kind and retry (only if we have an allocation site in the slot).
__ Addu(a3, a3, Operand(1));
if (FLAG_debug_code) {
@@ -5468,46 +5390,33 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc (only if argument_count_ == ANY)
// -- a1 : constructor
- // -- a2 : type info cell
+ // -- a2 : AllocationSite or undefined
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
+
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ SmiTst(a3, at);
+ __ SmiTst(t0, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
- __ GetObjectType(a3, a3, t0);
+ __ GetObjectType(t0, t0, t1);
__ Assert(eq, kUnexpectedInitialMapForArrayFunction,
- t0, Operand(MAP_TYPE));
+ t1, Operand(MAP_TYPE));
- // We should either have undefined in a2 or a valid cell.
- Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&okay_here, eq, a2, Operand(at));
- __ lw(a3, FieldMemOperand(a2, 0));
- __ Assert(eq, kExpectedPropertyCellInRegisterA2,
- a3, Operand(cell_map));
- __ bind(&okay_here);
+ // We should either have undefined in a2 or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(a2, t0);
}
Label no_info;
// Get the elements kind and case on that.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at));
- __ lw(a2, FieldMemOperand(a2, Cell::kValueOffset));
-
- // If the type cell is undefined, or contains anything other than an
- // AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ lw(t0, FieldMemOperand(a2, 0));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&no_info, ne, t0, Operand(at));
__ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(a3);
@@ -5615,7 +5524,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register context = cp;
int argc = ArgumentBits::decode(bit_field_);
- bool restore_context = RestoreContextBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
typedef FunctionCallbackArguments FCA;
@@ -5682,15 +5591,20 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
- MemOperand return_value_operand(fp,
- (2 + FCA::kReturnValueOffset) * kPointerSize);
+ // Stores return the first js argument.
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
__ CallApiFunctionAndReturn(api_function_address,
thunk_ref,
kStackUnwindSpace,
return_value_operand,
- restore_context ?
- &context_restore_operand : NULL);
+ &context_restore_operand);
}
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index 8d65d5b05..e71c30583 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -367,7 +367,7 @@ class RecordWriteStub: public PlatformCodeStub {
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index 1535231dd..b9bf69db4 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -274,9 +274,10 @@ void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-mips.cc).
// ----------- S t a t e -------------
// -- a1 : function
- // -- a2 : cache cell for call target
+ // -- a2 : feedback array
+ // -- a3 : slot in feedback array
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), 0);
+ Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit() | a3.bit(), 0);
}
@@ -295,9 +296,10 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments (not smi)
// -- a1 : constructor function
- // -- a2 : cache cell for call target
+ // -- a2 : feedback array
+ // -- a3 : feedback slot (smi)
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), a0.bit());
+ Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit() | a3.bit(), a0.bit());
}
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 6bd9ba7b7..0cd5e2ccd 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -49,13 +49,36 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength * Assembler::kInstrSize;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->break_(0xCC);
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->break_(0xCC);
+ }
+ }
+
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
@@ -371,6 +394,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
}
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
#undef __
diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h
index d9c0c798a..0ec2cbb86 100644
--- a/deps/v8/src/mips/frames-mips.h
+++ b/deps/v8/src/mips/frames-mips.h
@@ -176,6 +176,8 @@ class ExitFrameConstants : public AllStatic {
// FP-relative displacement of the caller's SP.
static const int kCallerSPDisplacement = +2 * kPointerSize;
+
+ static const int kConstantPoolOffset = 0; // Not used.
};
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 18ee02dc5..87c0764b6 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -120,6 +120,24 @@ class JumpPatchSite BASE_EMBEDDED {
};
+static void EmitStackCheck(MacroAssembler* masm_,
+ Register stack_limit_scratch,
+ int pointers = 0,
+ Register scratch = sp) {
+ Isolate* isolate = masm_->isolate();
+ Label ok;
+ ASSERT(scratch.is(sp) == (pointers == 0));
+ if (pointers != 0) {
+ __ Subu(scratch, sp, Operand(pointers * kPointerSize));
+ }
+ __ LoadRoot(stack_limit_scratch, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, scratch, Operand(stack_limit_scratch));
+ PredictableCodeSizeScope predictable(masm_, 4 * Assembler::kInstrSize);
+ __ Call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+}
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
@@ -138,6 +156,9 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ InitializeFeedbackVector();
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -152,10 +173,10 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Classic mode functions and builtins need to replace the receiver with the
+ // Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->is_classic_mode() && !info->is_native()) {
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ lw(at, MemOperand(sp, receiver_offset));
@@ -184,22 +205,30 @@ void FullCodeGenerator::Generate() {
// Generators allocate locals, if any, in context slots.
ASSERT(!info->function()->is_generator() || locals_count == 0);
if (locals_count > 0) {
- // Emit a loop to initialize stack cells for locals when optimizing for
- // size. Otherwise, unroll the loop for maximum performance.
+ if (locals_count >= 128) {
+ EmitStackCheck(masm_, a2, locals_count, t5);
+ }
__ LoadRoot(t5, Heap::kUndefinedValueRootIndex);
- if ((FLAG_optimize_for_size && locals_count > 4) ||
- !is_int16(locals_count)) {
- Label loop;
- __ Subu(a2, sp, Operand(locals_count * kPointerSize));
- __ bind(&loop);
- __ Subu(sp, sp, Operand(kPointerSize));
- __ Branch(&loop, gt, sp, Operand(a2), USE_DELAY_SLOT);
- __ sw(t5, MemOperand(sp, 0)); // Push in the delay slot.
- } else {
- __ Subu(sp, sp, Operand(locals_count * kPointerSize));
- for (int i = 0; i < locals_count; i++) {
+ int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ li(a2, Operand(loop_iterations));
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ __ Subu(sp, sp, Operand(kMaxPushes * kPointerSize));
+ for (int i = 0; i < kMaxPushes; i++) {
__ sw(t5, MemOperand(sp, i * kPointerSize));
}
+ // Continue loop if not done.
+ __ Subu(a2, a2, Operand(1));
+ __ Branch(&loop_header, ne, a2, Operand(zero_reg));
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ __ Subu(sp, sp, Operand(remaining * kPointerSize));
+ for (int i = 0; i < remaining; i++) {
+ __ sw(t5, MemOperand(sp, i * kPointerSize));
}
}
}
@@ -214,13 +243,13 @@ void FullCodeGenerator::Generate() {
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
__ push(a1);
__ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ push(a1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in v0. It replaces the context passed to us.
@@ -270,12 +299,12 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
+ if (strict_mode() == STRICT) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
ArgumentsAccessStub stub(type);
__ CallStub(&stub);
@@ -301,7 +330,7 @@ void FullCodeGenerator::Generate() {
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableDeclaration* function = scope()->function();
ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
+ function->proxy()->var()->mode() == CONST_LEGACY);
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
@@ -310,11 +339,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ LoadRoot(t0, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(t0));
- __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ bind(&ok);
+ EmitStackCheck(masm_, at);
}
{ Comment cmnt(masm_, "[ Body");
@@ -679,7 +704,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
__ mov(a0, result_register());
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, NOT_CONTEXTUAL, condition->test_id());
+ CallIC(ic, condition->test_id());
__ mov(at, zero_reg);
Split(ne, v0, Operand(at), if_true, if_false, fall_through);
}
@@ -802,7 +827,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_->Add(variable->name(), zone());
@@ -852,7 +877,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ mov(a0, zero_reg); // Smi::FromInt(0) indicates no initial value.
__ Push(cp, a2, a1, a0);
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -908,7 +933,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(cp, a2, a1);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -980,7 +1005,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ li(a1, Operand(pairs));
__ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
__ Push(cp, a1, a0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
// Return value is ignored.
}
@@ -988,7 +1013,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
// Return value is ignored.
}
@@ -1044,7 +1069,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, NOT_CONTEXTUAL, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
Label skip;
@@ -1087,6 +1112,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1172,13 +1198,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ li(a1, cell);
- __ li(a2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
- __ sw(a2, FieldMemOperand(a1, Cell::kValueOffset));
+ Handle<Object> feedback = Handle<Object>(
+ Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
+ isolate());
+ StoreFeedbackVectorSlot(slot, feedback);
+ __ li(a1, FeedbackVector());
+ __ li(a2, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)));
+ __ sw(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(slot)));
__ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ lw(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@@ -1338,7 +1364,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ FastNewClosureStub stub(info->strict_mode(), info->is_generator());
__ li(a2, Operand(info));
__ CallStub(&stub);
} else {
@@ -1346,7 +1372,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ LoadRoot(a1, pretenure ? Heap::kTrueValueRootIndex
: Heap::kFalseValueRootIndex);
__ Push(cp, a0, a1);
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
}
context()->Plug(v0);
}
@@ -1368,7 +1394,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ lw(temp, ContextOperand(current, Context::EXTENSION_INDEX));
__ Branch(slow, ne, temp, Operand(zero_reg));
@@ -1380,7 +1406,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
}
// If no outer scope calls eval, we do not need to check more
// context extensions.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1421,7 +1447,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ Branch(slow, ne, temp, Operand(zero_reg));
@@ -1457,19 +1483,18 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (local->mode() == CONST) {
+ if (local->mode() == CONST_LEGACY) {
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
__ Movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
- } else { // LET || CONST_HARMONY
+ } else { // LET || CONST
__ Branch(done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
__ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
}
}
__ Branch(done);
@@ -1486,7 +1511,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in a2 and the global
// object (receiver) in a0.
__ lw(a0, GlobalObjectOperand());
@@ -1499,9 +1524,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1533,7 +1557,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Check that we always have valid source position.
ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
+ skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
@@ -1542,18 +1566,18 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
GetVar(v0, var);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
Label done;
__ Branch(&done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
__ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
__ bind(&done);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
+ ASSERT(var->mode() == CONST_LEGACY);
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
__ Movz(v0, a0, at); // Conditional move: Undefined if TheHole.
}
@@ -1566,15 +1590,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
__ li(a1, Operand(var->name()));
__ Push(cp, a1); // Context and name.
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ bind(&done);
context()->Plug(v0);
}
@@ -1606,7 +1630,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ li(a2, Operand(expr->pattern()));
__ li(a1, Operand(expr->flags()));
__ Push(t0, a3, a2, a1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
__ mov(t1, v0);
__ bind(&materialized);
@@ -1618,7 +1642,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ bind(&runtime_allocate);
__ li(a0, Operand(Smi::FromInt(size)));
__ Push(t1, a0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ pop(t1);
__ bind(&allocated);
@@ -1659,12 +1683,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
__ li(a0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
- if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1 || Serializer::enabled() ||
+ if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
@@ -1703,7 +1726,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(a0, result_register());
__ li(a2, Operand(key->value()));
__ lw(a1, MemOperand(sp));
- CallStoreIC(NOT_CONTEXTUAL, key->LiteralFeedbackId());
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1818,7 +1841,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ li(a0, Operand(Smi::FromInt(flags)));
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
@@ -1879,13 +1902,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2024,7 +2043,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Addu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
__ Branch(&post_runtime, eq, sp, Operand(a1));
__ push(v0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
__ pop(result_register());
@@ -2092,7 +2111,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(a1, cp);
__ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2,
kRAHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ pop(v0); // result
EmitReturnSequence();
@@ -2111,7 +2130,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ lw(a1, MemOperand(sp, kPointerSize));
__ lw(a0, MemOperand(sp, 2 * kPointerSize));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, TypeFeedbackId::None());
+ CallIC(ic, TypeFeedbackId::None());
__ mov(a0, v0);
__ mov(a1, a0);
__ sw(a1, MemOperand(sp, 2 * kPointerSize));
@@ -2147,7 +2166,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in a0, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
// is read to throw the value when the resumed generator is already closed.
// a1 will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
@@ -2229,7 +2248,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
ASSERT(!result_register().is(a1));
__ Push(a1, result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
@@ -2244,14 +2263,14 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
} else {
// Throw the provided value.
__ push(a0);
- __ CallRuntime(Runtime::kThrow, 1);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
}
__ jmp(&done);
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
__ push(a1);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
@@ -2269,7 +2288,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ lw(context_register(),
MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2309,7 +2328,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
__ mov(a0, result_register());
// Call keyed load IC. It has arguments key and receiver in a0 and a1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2337,8 +2356,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2417,20 +2435,14 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(a1);
BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(v0);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
+ ASSERT(expr->IsValidLeftHandSide());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2456,7 +2468,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(a1, result_register());
__ pop(a0); // Restore value.
__ li(a2, Operand(prop->key()->AsLiteral()->value()));
- CallStoreIC(NOT_CONTEXTUAL);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
@@ -2465,7 +2477,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->key());
__ mov(a1, result_register());
__ Pop(a0, a2); // a0 = restored value.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic);
@@ -2476,43 +2488,58 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ sw(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ Move(a3, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ li(a1, Operand(name));
+ __ li(a0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(v0, cp, a1, a0); // Value, context, name, strict mode.
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(a0, result_register());
__ li(a2, Operand(var->name()));
__ lw(a1, GlobalObjectOperand());
- CallStoreIC(CONTEXTUAL);
- } else if (op == Token::INIT_CONST) {
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- Label skip;
- __ lw(a1, StackOperand(var));
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- __ Branch(&skip, ne, a1, Operand(t0));
- __ sw(result_register(), StackOperand(var));
- __ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
+ if (var->IsLookupSlot()) {
__ li(a0, Operand(var->name()));
__ Push(v0, cp, a0); // Context and name.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, a1);
+ __ lw(a2, location);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&skip, ne, a2, Operand(at));
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(language_mode())));
- __ Push(v0, cp, a1, a0); // Value, context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), strict_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2522,23 +2549,19 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ Branch(&assign, ne, a3, Operand(t0));
__ li(a3, Operand(var->name()));
__ push(a3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
// Perform the assignment.
__ bind(&assign);
- __ sw(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ mov(a3, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, a1);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2546,23 +2569,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
__ Check(eq, kLetBindingReInitialization, a2, Operand(t0));
}
- // Perform the assignment.
- __ sw(v0, location);
- if (var->IsContextSlot()) {
- __ mov(a3, v0);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(language_mode())));
- __ Push(v0, cp, a1, a0); // Value, context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
- // Non-initializing assignments to consts are ignored.
+ // Non-initializing assignments to consts are ignored.
}
@@ -2578,7 +2588,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ li(a2, Operand(prop->key()->AsLiteral()->value()));
__ pop(a1);
- CallStoreIC(NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2598,10 +2608,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ mov(a0, result_register());
__ Pop(a2, a1); // a1 = key.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2628,10 +2638,8 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- ContextualMode mode,
TypeFeedbackId id) {
ic_total_count_++;
- ASSERT(mode != CONTEXTUAL || id.IsNone());
__ Call(code, RelocInfo::CODE_TARGET, id);
}
@@ -2650,7 +2658,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
PrepareForBailout(callee, NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
- // is a classic mode method.
+ // is a sloppy mode method.
__ Push(isolate()->factory()->undefined_value());
flags = NO_CALL_FUNCTION_FLAGS;
} else {
@@ -2741,15 +2749,15 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
SetSourcePosition(expr->position());
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ li(a2, Operand(cell));
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
+ __ li(a2, FeedbackVector());
+ __ li(a3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
// Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
+ __ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2769,15 +2777,15 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
int receiver_offset = 2 + info_->scope()->num_parameters();
__ lw(t1, MemOperand(fp, receiver_offset * kPointerSize));
- // t0: the language mode.
- __ li(t0, Operand(Smi::FromInt(language_mode())));
+ // t0: the strict mode.
+ __ li(t0, Operand(Smi::FromInt(strict_mode())));
// a1: the start position of the scope the calls resides in.
__ li(a1, Operand(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
__ Push(t2, t1, t0, a1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
}
@@ -2793,8 +2801,8 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Call::CallType call_type = expr->GetCallType(isolate());
if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
// arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2849,7 +2857,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
ASSERT(!context_register().is(a2));
__ li(a2, Operand(proxy->name()));
__ Push(context_register(), a2);
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ Push(v0, v1); // Function, receiver.
// If fast case code has been generated, emit code to push the
@@ -2928,10 +2936,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ li(a2, Operand(cell));
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
+ if (FLAG_pretenuring_call_new) {
+ StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(),
+ isolate()->factory()->NewAllocationSite());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ li(a2, FeedbackVector());
+ __ li(a3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@@ -3409,7 +3424,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
+ __ CallRuntime(Runtime::kHiddenLog, 2);
}
// Finally, we're expected to leave a value on the top of the stack.
@@ -3506,7 +3521,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
__ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
__ bind(&done);
context()->Plug(v0);
}
@@ -3897,7 +3912,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ bind(&not_found);
// Call runtime to perform the lookup.
__ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
__ bind(&done);
context()->Plug(v0);
@@ -4178,8 +4193,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
@@ -4242,9 +4257,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ li(a1, Operand(Smi::FromInt(strict_mode_flag)));
+ __ li(a1, Operand(Smi::FromInt(strict_mode())));
__ push(a1);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(v0);
@@ -4252,11 +4265,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
__ lw(a2, GlobalObjectOperand());
__ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(kNonStrictMode)));
+ __ li(a0, Operand(Smi::FromInt(SLOPPY)));
__ Push(a2, a1, a0);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(v0);
@@ -4270,7 +4283,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
ASSERT(!context_register().is(a2));
__ li(a2, Operand(var->name()));
__ Push(context_register(), a2);
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
context()->Plug(v0);
}
} else {
@@ -4345,16 +4358,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -4471,9 +4479,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- NOT_CONTEXTUAL,
- expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4503,7 +4509,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(a0, result_register()); // Value.
__ li(a2, Operand(prop->key()->AsLiteral()->value())); // Name.
__ pop(a1); // Receiver.
- CallStoreIC(NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4517,10 +4523,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
__ mov(a0, result_register()); // Value.
__ Pop(a2, a1); // a1 = key, a2 = receiver.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4540,7 +4546,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(proxy->name()));
// Use a regular load, not a contextual load, to avoid a reference
@@ -4549,6 +4555,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
PrepareForBailout(expr, TOS_REG);
context()->Plug(v0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4558,7 +4565,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
__ bind(&slow);
__ li(a0, Operand(proxy->name()));
__ Push(cp, a0);
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
@@ -4705,7 +4712,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
@@ -4739,7 +4746,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
Split(ne, v0, Operand(zero_reg), if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index 14d1cd682..09ffe95c0 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -229,7 +229,8 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
__ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, scratch, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+ __ And(at, scratch,
+ Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ Branch(slow, ne, at, Operand(zero_reg));
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
@@ -338,8 +339,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_state) {
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -347,9 +347,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_state,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a0, a2, a3, t0, t1, t2);
@@ -419,6 +417,8 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register scratch3,
Label* unmapped_case,
Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+
// Check that the receiver is a JSObject. Because of the map check
// later, we do not need to check for interceptors or whether it
// requires access checks.
@@ -432,10 +432,11 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
__ Branch(slow_case, ne, scratch1, Operand(zero_reg));
// Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
__ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1,
scratch2,
- Heap::kNonStrictArgumentsElementsMapRootIndex,
+ arguments_map,
slow_case,
DONT_DO_SMI_CHECK);
// Check if element is in the range of mapped arguments. If not, jump
@@ -498,7 +499,7 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
}
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- a0 : key
@@ -523,7 +524,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -649,7 +650,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyNameCheck(masm, key, a2, a3, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(
- masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
+ masm, receiver, a2, a3, Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the keyed lookup
@@ -802,7 +803,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -994,7 +995,7 @@ static void KeyedStoreGenerateGenericHelper(
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -1180,8 +1181,7 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
@@ -1190,9 +1190,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_ic_state,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
@@ -1240,7 +1238,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index f033f6d34..970a1bfc2 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -84,7 +84,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- RegisterDependentCodeForEmbeddedMaps(code);
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
info()->CommitDependencies(code);
}
@@ -146,11 +146,11 @@ bool LCodeGen::GeneratePrologue() {
// fp: Caller's frame pointer.
// lr: Caller's pc.
- // Classic mode functions and builtins need to replace the receiver with the
+ // Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
if (info_->this_has_uses() &&
- info_->is_classic_mode() &&
+ info_->strict_mode() == SLOPPY &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
@@ -207,7 +207,7 @@ bool LCodeGen::GeneratePrologue() {
__ CallStub(&stub);
} else {
__ push(a1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in both v0. It replaces the context passed to us.
@@ -260,6 +260,9 @@ void LCodeGen::GenerateOsrPrologue() {
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
if (!instr->IsLazyBailout() && !instr->IsGap()) {
safepoints_.BumpLastLazySafepointIndex();
}
@@ -274,7 +277,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -407,7 +411,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
__ li(scratch, literal);
}
return scratch;
- } else if (op->IsStackSlot() || op->IsArgument()) {
+ } else if (op->IsStackSlot()) {
__ lw(scratch, ToMemOperand(op));
return scratch;
}
@@ -443,7 +447,7 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
} else if (r.IsTagged()) {
Abort(kUnsupportedTaggedImmediate);
}
- } else if (op->IsStackSlot() || op->IsArgument()) {
+ } else if (op->IsStackSlot()) {
MemOperand mem_op = ToMemOperand(op);
__ ldc1(dbl_scratch, mem_op);
return dbl_scratch;
@@ -661,10 +665,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -866,6 +866,14 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1062,174 +1070,180 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
}
-void LCodeGen::DoModI(LModI* instr) {
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
HMod* hmod = instr->hydrogen();
- HValue* left = hmod->left();
- HValue* right = hmod->right();
- if (hmod->RightIsPowerOf2()) {
- const Register left_reg = ToRegister(instr->left());
- const Register result_reg = ToRegister(instr->result());
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
// Note: The code below even works when right contains kMinInt.
- int32_t divisor = Abs(right->GetInteger32Constant());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ Branch(left_reg.is(result_reg) ? PROTECT : USE_DELAY_SLOT,
- &left_is_not_negative, ge, left_reg, Operand(zero_reg));
- __ subu(result_reg, zero_reg, left_reg);
- __ And(result_reg, result_reg, divisor - 1);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
- }
- __ Branch(USE_DELAY_SLOT, &done);
- __ subu(result_reg, zero_reg, result_reg);
+ __ subu(dividend, zero_reg, dividend);
+ __ And(dividend, dividend, Operand(mask));
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
}
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ subu(dividend, zero_reg, dividend);
+ }
- __ bind(&left_is_not_negative);
- __ And(result_reg, left_reg, divisor - 1);
- __ bind(&done);
- } else {
- const Register scratch = scratch0();
- const Register left_reg = ToRegister(instr->left());
- const Register result_reg = ToRegister(instr->result());
+ __ bind(&dividend_is_not_negative);
+ __ And(dividend, dividend, Operand(mask));
+ __ bind(&done);
+}
- // div runs in the background while we check for special cases.
- Register right_reg = EmitLoadRegister(instr->right(), scratch);
- __ div(left_reg, right_reg);
- Label done;
- // Check for x % 0, we have to deopt in this case because we can't return a
- // NaN.
- if (right->CanBeZero()) {
- DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
- }
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
- // Check for kMinInt % -1, we have to deopt if we care about -0, because we
- // can't return that.
- if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
- Label left_not_min_int;
- __ Branch(&left_not_min_int, ne, left_reg, Operand(kMinInt));
- // TODO(svenpanne) Don't deopt when we don't care about -0.
- DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
- __ bind(&left_not_min_int);
- }
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ __ Mul(result, result, Operand(Abs(divisor)));
+ __ Subu(result, dividend, Operand(result));
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr->environment(), dividend, Operand(zero_reg));
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+ const Register left_reg = ToRegister(instr->left());
+ const Register right_reg = ToRegister(instr->right());
+ const Register result_reg = ToRegister(instr->result());
+
+ // div runs in the background while we check for special cases.
+ __ div(left_reg, right_reg);
- // TODO(svenpanne) Only emit the test/deopt if we have to.
- __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
- __ mfhi(result_reg);
+ Label done;
+ // Check for x % 0, we have to deopt in this case because we can't return a
+ // NaN.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
+ }
+ // Check for kMinInt % -1, div will return kMinInt, which is not what we
+ // want. We have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
+ DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
+ } else {
+ __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ mov(result_reg, zero_reg);
}
- __ bind(&done);
+ __ bind(&no_overflow_possible);
+ }
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
+ __ mfhi(result_reg);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
}
+ __ bind(&done);
}
-void LCodeGen::EmitSignedIntegerDivisionByConstant(
- Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment) {
- ASSERT(!AreAliased(dividend, scratch, at, no_reg));
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(kMinInt));
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ And(at, dividend, Operand(mask));
+ DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ }
+
+ if (divisor == -1) { // Nice shortcut, not needed for correctness.
+ __ Subu(result, zero_reg, dividend);
+ return;
+ }
+ uint16_t shift = WhichPowerOf2Abs(divisor);
+ if (shift == 0) {
+ __ Move(result, dividend);
+ } else if (shift == 1) {
+ __ srl(result, dividend, 31);
+ __ Addu(result, dividend, Operand(result));
+ } else {
+ __ sra(result, dividend, 31);
+ __ srl(result, result, 32 - shift);
+ __ Addu(result, dividend, Operand(result));
+ }
+ if (shift > 0) __ sra(result, result, shift);
+ if (divisor < 0) __ Subu(result, zero_reg, result);
+}
+
- uint32_t divisor_abs = abs(divisor);
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
- switch (divisor_abs) {
- case 0:
- DeoptimizeIf(al, environment);
- return;
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ }
- case 1:
- if (divisor > 0) {
- __ Move(result, dividend);
- } else {
- __ SubuAndCheckForOverflow(result, zero_reg, dividend, scratch);
- DeoptimizeIf(lt, environment, scratch, Operand(zero_reg));
- }
- // Compute the remainder.
- __ Move(remainder, zero_reg);
- return;
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Subu(result, zero_reg, result);
- default:
- if (IsPowerOf2(divisor_abs)) {
- // Branch and condition free code for integer division by a power
- // of two.
- int32_t power = WhichPowerOf2(divisor_abs);
- if (power > 1) {
- __ sra(scratch, dividend, power - 1);
- }
- __ srl(scratch, scratch, 32 - power);
- __ Addu(scratch, dividend, Operand(scratch));
- __ sra(result, scratch, power);
- // Negate if necessary.
- // We don't need to check for overflow because the case '-1' is
- // handled separately.
- if (divisor < 0) {
- ASSERT(divisor != -1);
- __ Subu(result, zero_reg, Operand(result));
- }
- // Compute the remainder.
- if (divisor > 0) {
- __ sll(scratch, result, power);
- __ Subu(remainder, dividend, Operand(scratch));
- } else {
- __ sll(scratch, result, power);
- __ Addu(remainder, dividend, Operand(scratch));
- }
- return;
- } else if (LChunkBuilder::HasMagicNumberForDivisor(divisor)) {
- // Use magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer's Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- // Branch and condition free code for integer division by a power
- // of two.
- const int32_t M = magic_numbers.M;
- const int32_t s = magic_numbers.s + power_of_2_factor;
-
- __ li(scratch, Operand(M));
- __ mult(dividend, scratch);
- __ mfhi(scratch);
- if (M < 0) {
- __ Addu(scratch, scratch, Operand(dividend));
- }
- if (s > 0) {
- __ sra(scratch, scratch, s);
- __ mov(scratch, scratch);
- }
- __ srl(at, dividend, 31);
- __ Addu(result, scratch, Operand(at));
- if (divisor < 0) __ Subu(result, zero_reg, Operand(result));
- // Compute the remainder.
- __ li(scratch, Operand(divisor));
- __ Mul(scratch, result, Operand(scratch));
- __ Subu(remainder, dividend, Operand(scratch));
- } else {
- __ li(scratch, Operand(divisor));
- __ div(dividend, scratch);
- __ mfhi(remainder);
- __ mflo(result);
- }
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ Mul(scratch0(), result, Operand(divisor));
+ __ Subu(scratch0(), scratch0(), dividend);
+ DeoptimizeIf(ne, instr->environment(), scratch0(), Operand(zero_reg));
}
}
void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
const Register left = ToRegister(instr->left());
const Register right = ToRegister(instr->right());
const Register result = ToRegister(instr->result());
@@ -1239,12 +1253,12 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ div(left, right);
// Check for x / 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, left, Operand(zero_reg));
DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
@@ -1252,18 +1266,32 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
// Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+ !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
__ bind(&left_not_min_int);
}
- if (!instr->hydrogen()->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ if (hdiv->IsMathFloorOfDiv()) {
+ // We performed a truncating division. Correct the result if necessary.
+ Label done;
+ Register remainder = scratch0();
+ __ mfhi(remainder);
+ __ mflo(result);
+ __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
+ __ Xor(remainder, remainder, Operand(right));
+ __ Branch(&done, ge, remainder, Operand(zero_reg));
+ __ Subu(result, result, Operand(1));
+ __ bind(&done);
+ } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
__ mfhi(result);
DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
+ __ mflo(result);
+ } else {
+ __ mflo(result);
}
- __ mflo(result);
}
@@ -1279,67 +1307,94 @@ void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
}
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- const Register result = ToRegister(instr->result());
- const Register left = ToRegister(instr->left());
- const Register remainder = ToRegister(instr->temp());
- const Register scratch = scratch0();
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ Register result = ToRegister(instr->result());
+ int32_t divisor = instr->divisor();
+ Register scratch = scratch0();
+ ASSERT(!scratch.is(dividend));
+
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ if (divisor == 1) return;
+ uint16_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ sra(result, dividend, shift);
+ return;
+ }
- if (instr->right()->IsConstantOperand()) {
- Label done;
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- if (divisor < 0) {
- DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
+ // If the divisor is negative, we have to negate and handle edge cases.
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ __ Move(scratch, dividend);
+ }
+ __ Subu(result, zero_reg, dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ // Note that we could emit branch-free code, but that would need one more
+ // register.
+ __ Xor(at, scratch, result);
+ if (divisor == -1) {
+ DeoptimizeIf(ge, instr->environment(), at, Operand(zero_reg));
+ __ sra(result, dividend, shift);
+ } else {
+ Label no_overflow, done;
+ __ Branch(&no_overflow, lt, at, Operand(zero_reg));
+ __ li(result, Operand(kMinInt / divisor));
+ __ Branch(&done);
+ __ bind(&no_overflow);
+ __ sra(result, dividend, shift);
+ __ bind(&done);
}
- EmitSignedIntegerDivisionByConstant(result,
- left,
- divisor,
- remainder,
- scratch,
- instr->environment());
- // We performed a truncating division. Correct the result if necessary.
- __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
- __ Xor(scratch , remainder, Operand(divisor));
- __ Branch(&done, ge, scratch, Operand(zero_reg));
- __ Subu(result, result, Operand(1));
- __ bind(&done);
} else {
- Label done;
- const Register right = ToRegister(instr->right());
-
- // On MIPS div is asynchronous - it will run in the background while we
- // check for special cases.
- __ div(left, right);
+ __ sra(result, dividend, shift);
+ }
+}
- // Check for x / 0.
- DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
- DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
- __ bind(&left_not_zero);
- }
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
- // Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
- __ bind(&left_not_min_int);
- }
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
- __ mfhi(remainder);
- __ mflo(result);
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+ }
- // We performed a truncating division. Correct the result if necessary.
- __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
- __ Xor(scratch , remainder, Operand(right));
- __ Branch(&done, ge, scratch, Operand(zero_reg));
- __ Subu(result, result, Operand(1));
- __ bind(&done);
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Subu(result, zero_reg, result);
+ return;
}
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp());
+ ASSERT(!temp.is(dividend) && !temp.is(result));
+ Label needs_adjustment, done;
+ __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
+ dividend, Operand(zero_reg));
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ Subu(result, zero_reg, result);
+ __ jmp(&done);
+ __ bind(&needs_adjustment);
+ __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(result, temp, Abs(divisor));
+ if (divisor < 0) __ Subu(result, zero_reg, result);
+ __ Subu(result, result, Operand(1));
+ __ bind(&done);
}
@@ -1465,7 +1520,7 @@ void LCodeGen::DoBitI(LBitI* instr) {
Register result = ToRegister(instr->result());
Operand right(no_reg);
- if (right_op->IsStackSlot() || right_op->IsArgument()) {
+ if (right_op->IsStackSlot()) {
right = Operand(EmitLoadRegister(right_op, at));
} else {
ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
@@ -1587,7 +1642,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
if (!can_overflow) {
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, at);
__ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
} else {
@@ -1597,9 +1652,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
} else { // can_overflow.
Register overflow = scratch0();
Register scratch = scratch1();
- if (right->IsStackSlot() ||
- right->IsArgument() ||
- right->IsConstantOperand()) {
+ if (right->IsStackSlot() || right->IsConstantOperand()) {
Register right_reg = EmitLoadRegister(right, scratch);
__ SubuAndCheckForOverflow(ToRegister(result),
ToRegister(left),
@@ -1779,7 +1832,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
if (!can_overflow) {
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, at);
__ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
} else {
@@ -1790,7 +1843,6 @@ void LCodeGen::DoAddI(LAddI* instr) {
Register overflow = scratch0();
Register scratch = scratch1();
if (right->IsStackSlot() ||
- right->IsArgument() ||
right->IsConstantOperand()) {
Register right_reg = EmitLoadRegister(right, scratch);
__ AdduAndCheckForOverflow(ToRegister(result),
@@ -3088,7 +3140,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3447,7 +3499,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
// The context is the first argument.
__ Push(cp, scratch0(), scratch1());
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
}
@@ -3536,7 +3588,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(v0))
@@ -3801,6 +3853,13 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
}
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ __ Clz(result, input);
+}
+
+
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->function()).is(a1));
@@ -3881,8 +3940,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
__ li(a0, Operand(instr->arity()));
// No cell in a2 for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ li(a2, Operand(undefined_value));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3894,7 +3952,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
__ li(a0, Operand(instr->arity()));
- __ li(a2, Operand(factory()->undefined_value()));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -3974,12 +4032,21 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
Handle<Map> transition = instr->transition();
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ ASSERT(!(representation.IsSmi() &&
+ instr->value()->IsConstantOperand() &&
+ !IsSmi(LConstantOperand::cast(instr->value()))));
+ if (representation.IsHeapObject()) {
Register value = ToRegister(instr->value());
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ SmiTst(value, scratch);
DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
+
+ // We know that value is a smi now, so we can omit the check below.
+ check_needed = OMIT_SMI_CHECK;
}
} else if (representation.IsDouble()) {
ASSERT(transition.is_null());
@@ -4009,9 +4076,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
// Do the store.
Register value = ToRegister(instr->value());
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) {
MemOperand operand = FieldMemOperand(object, offset);
__ Store(value, operand, representation);
@@ -4053,8 +4117,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
// Name is always in a2.
__ li(a2, Operand(instr->name()));
- Handle<Code> ic = StoreIC::initialize_stub(isolate(),
- instr->strict_mode_flag());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4185,7 +4248,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4310,7 +4373,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(a1));
ASSERT(ToRegister(instr->value()).is(a0));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ Handle<Code> ic = (instr->strict_mode() == STRICT)
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -4421,7 +4484,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
+ CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
instr->context());
__ AssertSmi(v0);
__ SmiUntag(v0);
@@ -4497,22 +4560,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
-void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- Register scratch = scratch0();
-
- ASSERT(output->IsRegister());
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- __ SmiTagCheckOverflow(ToRegister(output), ToRegister(input), scratch);
- DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
- } else {
- __ SmiTag(ToRegister(output), ToRegister(input));
- }
-}
-
-
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4523,28 +4570,17 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
-void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- Register scratch = scratch0();
- __ And(scratch, ToRegister(input), Operand(0xc0000000));
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
- }
- __ SmiTag(ToRegister(output), ToRegister(input));
-}
-
-
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- SIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ SIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4568,9 +4604,11 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- UNSIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ UNSIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4587,18 +4625,19 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
-void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness) {
- Label slow;
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness) {
+ Label done, slow;
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
+ Register tmp1 = scratch0();
+ Register tmp2 = ToRegister(temp1);
+ Register tmp3 = ToRegister(temp2);
DoubleRegister dbl_scratch = double_scratch0();
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- Label done;
if (signedness == SIGNED_INT32) {
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
@@ -4615,37 +4654,41 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
}
if (FLAG_inline_new) {
- __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t1, a3, t0, scratch0(), &slow, DONT_TAG_RESULT);
- __ Move(dst, t1);
+ __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
__ Branch(&done);
}
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ mov(dst, zero_reg);
+
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ Subu(v0, v0, kHeapObjectTag);
+ __ StoreToSafepointRegisterSlot(v0, dst);
+ }
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ StoreToSafepointRegisterSlot(zero_reg, dst);
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ Move(dst, v0);
- __ Subu(dst, dst, kHeapObjectTag);
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
__ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
__ Addu(dst, dst, kHeapObjectTag);
- __ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -4694,11 +4737,11 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
// NumberTagI and NumberTagD use the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ Subu(v0, v0, kHeapObjectTag);
@@ -4707,8 +4750,21 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ And(at, input, Operand(0xc0000000));
+ DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+ }
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ SmiTagCheckOverflow(output, input, at);
+ DeoptimizeIf(lt, instr->environment(), at, Operand(zero_reg));
+ } else {
+ __ SmiTag(output, input);
+ }
}
@@ -5169,6 +5225,25 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ DoubleRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ FmoveHigh(result_reg, value_reg);
+ } else {
+ __ FmoveLow(result_reg, value_reg);
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ __ Move(result_reg, lo_reg, hi_reg);
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
@@ -5276,7 +5351,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(flags));
CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(v0, result);
}
@@ -5310,7 +5385,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ li(t1, Operand(instr->hydrogen()->pattern()));
__ li(t0, Operand(instr->hydrogen()->flags()));
__ Push(t3, t2, t1, t0);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
__ mov(a1, v0);
__ bind(&materialized);
@@ -5323,7 +5398,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&runtime_allocate);
__ li(a0, Operand(Smi::FromInt(size)));
__ Push(a1, a0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
__ pop(a1);
__ bind(&allocated);
@@ -5348,7 +5423,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ li(a2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -5357,7 +5432,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
__ li(a1, Operand(pretenure ? factory()->true_value()
: factory()->false_value()));
__ Push(cp, a2, a1);
- CallRuntime(Runtime::kNewClosure, 3, instr);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
}
}
@@ -5545,7 +5620,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5581,7 +5656,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
LoadContextFromDeferred(instr->context());
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
ASSERT(instr->HasEnvironment());
@@ -5617,10 +5692,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index 1e572bc95..63f0661ae 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -124,9 +124,11 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness);
+ void DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
@@ -161,9 +163,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
+ StrictMode strict_mode() const { return info()->strict_mode(); }
Scope* scope() const { return scope_; }
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index d423040a0..752f67673 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -839,7 +839,6 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
LInstruction* instr = NULL;
if (current->CanReplaceWithDummyUses()) {
@@ -1113,6 +1112,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
default:
UNREACHABLE();
return NULL;
@@ -1128,6 +1128,13 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
}
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
@@ -1248,14 +1255,61 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
+ dividend, divisor));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LDivI* div = new(zone()) LDivI(dividend, divisor);
+ return AssignEnvironment(DefineAsRegister(div));
+}
+
+
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LDivI* div = new(zone()) LDivI(dividend, divisor);
- return AssignEnvironment(DefineAsRegister(div));
+ if (instr->RightIsPowerOf2()) {
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else {
@@ -1264,72 +1318,99 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
- uint32_t divisor_abs = abs(divisor);
- // Dividing by 0, 1, and powers of 2 is easy.
- // Note that IsPowerOf2(0) returns true;
- ASSERT(IsPowerOf2(0) == true);
- if (IsPowerOf2(divisor_abs)) return true;
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
- // We have magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer's Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
- return false;
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegisterOrConstant(right);
- LOperand* remainder = TempRegister();
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
+ dividend, divisor));
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LInstruction* result = DefineAsRegister(new(zone()) LModI(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- if (instr->RightIsPowerOf2()) {
- ASSERT(!right->CanBeZero());
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseConstant(right));
- LInstruction* result = DefineAsRegister(mod);
- return (left->CanBeNegative() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
- } else {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegister(right),
- TempRegister(),
- FixedTemp(f20),
- FixedTemp(f22));
- LInstruction* result = DefineAsRegister(mod);
- return (right->CanBeZero() ||
- (left->RangeCanInclude(kMinInt) &&
- right->RangeCanInclude(-1)) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
- }
+ return instr->RightIsPowerOf2() ? DoModByPowerOf2I(instr) : DoModI(instr);
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MOD, instr);
} else {
@@ -1774,25 +1855,27 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
- if (val->CheckFlag(HInstruction::kUint32)) {
- LNumberTagU* result = new(zone()) LNumberTagU(value);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- } else if (val->HasRange() && val->range()->IsInSmiRange()) {
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
return DefineAsRegister(new(zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
} else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result = val->CheckFlag(HInstruction::kUint32)
- ? DefineAsRegister(new(zone()) LUint32ToSmi(value))
- : DefineAsRegister(new(zone()) LInteger32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return result;
+ LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
}
- return AssignEnvironment(result);
+ return result;
} else {
ASSERT(to.IsDouble());
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
@@ -1868,6 +1951,20 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub()
? UseFixed(instr->context(), cp)
@@ -2124,11 +2221,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
LOperand* val;
- if (needs_write_barrier ||
- (FLAG_track_fields && instr->field_representation().IsSmi())) {
+ if (needs_write_barrier || instr->field_representation().IsSmi()) {
val = UseTempRegister(instr->value());
- } else if (FLAG_track_double_fields &&
- instr->field_representation().IsDouble()) {
+ } else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
@@ -2138,8 +2233,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject()) {
+ if (instr->field_representation().IsHeapObject()) {
if (!instr->value()->type().IsHeapObject()) {
return AssignEnvironment(result);
}
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 39e269184..ae59e57f2 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -80,17 +80,23 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
V(DivI) \
V(DoubleToI) \
+ V(DoubleBits) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
@@ -103,7 +109,6 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -125,13 +130,15 @@ class LCodeGen;
V(MapEnumLength) \
V(MathAbs) \
V(MathExp) \
+ V(MathClz32) \
V(MathFloor) \
- V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
V(ModI) \
V(MulI) \
V(MultiplyAddD) \
@@ -171,7 +178,6 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
- V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(WrapReceiver)
@@ -613,42 +619,94 @@ class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LModI V8_FINAL : public LTemplateInstruction<1, 2, 3> {
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- // Used when the right hand is a constant power of 2.
- LModI(LOperand* left,
- LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = NULL;
- temps_[1] = NULL;
- temps_[2] = NULL;
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
}
- // Used for the standard case.
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 3> {
+ public:
LModI(LOperand* left,
- LOperand* right,
- LOperand* temp,
- LOperand* temp2,
- LOperand* temp3) {
+ LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
- temps_[0] = temp;
- temps_[1] = temp2;
- temps_[2] = temp3;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LDivI(LOperand* left, LOperand* right) {
@@ -660,26 +718,46 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* right() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
};
-class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
temps_[0] = temp;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
};
@@ -802,6 +880,18 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathClz32(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LMathExp(LOperand* value,
@@ -1863,19 +1953,6 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
@@ -1888,38 +1965,33 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LNumberTagI(LOperand* value) {
+ LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LNumberTagU(LOperand* value) {
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
@@ -2004,6 +2076,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -2079,7 +2152,7 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2136,7 +2209,7 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2339,6 +2412,33 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* context,
@@ -2553,8 +2653,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition) { }
+ allocator_(allocator) { }
// Build the sequence for the graph.
LPlatformChunk* Build();
@@ -2577,6 +2676,15 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HBinaryOperation* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
private:
enum Status {
@@ -2688,7 +2796,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
HBasicBlock* current_block_;
HBasicBlock* next_block_;
LAllocator* allocator_;
- int position_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 69a2a3dc4..77c02e734 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -3440,8 +3440,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&is_nan);
// Load canonical NaN for storing into the double array.
LoadRoot(at, Heap::kNanValueRootIndex);
- lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
- lw(exponent_reg, FieldMemOperand(at, HeapNumber::kValueOffset + 4));
+ lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
+ lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
jmp(&have_double_value);
bind(&smi_value);
@@ -3986,7 +3986,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
{
FrameScope frame(this, StackFrame::INTERNAL);
CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()),
0);
}
jmp(&exception_handled);
@@ -4346,16 +4346,8 @@ void MacroAssembler::Check(Condition cc, BailoutReason reason,
void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -4367,18 +4359,16 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- li(a0, Operand(p0));
- push(a0);
- li(a0, Operand(Smi::FromInt(p1 - p0)));
+ li(a0, Operand(Smi::FromInt(reason)));
push(a0);
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// Will not return here.
if (is_trampoline_pool_blocked()) {
@@ -4386,8 +4376,8 @@ void MacroAssembler::Abort(BailoutReason reason) {
// instructions generated, we insert padding here to keep the size
// of the Abort macro constant.
// Currently in debug mode with debug_code enabled the number of
- // generated instructions is 14, so we use this as a maximum value.
- static const int kExpectedAbortInstructions = 14;
+ // generated instructions is 10, so we use this as a maximum value.
+ static const int kExpectedAbortInstructions = 10;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
ASSERT(abort_instructions <= kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
@@ -4440,31 +4430,6 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- lw(map_out, FieldMemOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
lw(function,
@@ -4477,19 +4442,6 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
-void MacroAssembler::LoadArrayFunction(Register function) {
- // Load the global or builtins object from the current context.
- lw(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the global context from the global or builtins object.
- lw(function,
- FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the array function from the native context.
- lw(function,
- MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch) {
@@ -4865,6 +4817,23 @@ void MacroAssembler::AssertName(Register object) {
}
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ Branch(&done_checking, eq, object, Operand(scratch));
+ push(object);
+ lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
+ pop(object);
+ bind(&done_checking);
+ }
+}
+
+
void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
if (emit_debug_code()) {
ASSERT(!reg.is(at));
@@ -5482,9 +5451,9 @@ void MacroAssembler::Throw(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
} else {
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
}
// will not return here
if (is_trampoline_pool_blocked()) {
@@ -5754,6 +5723,28 @@ void CodePatcher::ChangeBranchCondition(Condition cond) {
}
+void MacroAssembler::TruncatingDiv(Register result,
+ Register dividend,
+ int32_t divisor) {
+ ASSERT(!dividend.is(result));
+ ASSERT(!dividend.is(at));
+ ASSERT(!result.is(at));
+ MultiplierAndShift ms(divisor);
+ li(at, Operand(ms.multiplier()));
+ Mult(dividend, Operand(at));
+ mfhi(result);
+ if (divisor > 0 && ms.multiplier() < 0) {
+ Addu(result, result, Operand(dividend));
+ }
+ if (divisor < 0 && ms.multiplier() > 0) {
+ Subu(result, result, Operand(dividend));
+ }
+ if (ms.shift() > 0) sra(result, result, ms.shift());
+ srl(at, dividend, 31);
+ Addu(result, result, Operand(at));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 85347c9e5..db9f1a2c7 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -871,14 +871,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
void LoadGlobalFunction(int index, Register function);
- void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -1311,6 +1304,10 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
return code_object_;
}
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged and at gets clobbered. Dividend and result must be different.
+ void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+
// -------------------------------------------------------------------------
// StatsCounter support.
@@ -1435,6 +1432,10 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
// Abort execution if reg is not the root value with the given index,
// enabled via --debug-code.
void AssertIsRoot(Register reg, Heap::RootListIndex index);
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 10417d573..d26499bbc 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -925,6 +925,10 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
}
+Simulator::~Simulator() {
+}
+
+
// When the generated code calls an external reference we need to catch that in
// the simulator. The external reference will be a function compiled for the
// host architecture. We need to call that function instead of trying to
@@ -1926,7 +1930,11 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
alu_out = rs_u * rt_u; // Only the lower 32 bits are kept.
break;
case CLZ:
- alu_out = __builtin_clz(rs_u);
+ // MIPS32 spec: If no bits were set in GPR rs, the result written to
+ // GPR rd is 32.
+ // GCC __builtin_clz: If input is 0, the result is undefined.
+ alu_out =
+ rs_u == 0 ? 32 : CompilerIntrinsics::CountLeadingZeros(rs_u);
break;
default:
UNREACHABLE();
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index d9fd10f24..92a0a87d2 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -203,6 +203,10 @@ class Simulator {
void set_pc(int32_t value);
int32_t get_pc() const;
+ Address get_sp() {
+ return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+ }
+
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index d1b428a34..153a81682 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -313,7 +313,7 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
bool inobject,
int index,
Representation representation) {
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -342,61 +342,6 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
-// Generate code to check if an object is a string. If the object is a
-// heap object, its map's instance type is left in the scratch1 register.
-// If this is not needed, scratch1 and scratch2 may be the same register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* smi,
- Label* non_string_object) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, smi, t0);
-
- // Check that the object is a string.
- __ lw(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ And(scratch2, scratch1, Operand(kIsNotStringMask));
- // The cast is to resolve the overload for the argument of 0x0.
- __ Branch(non_string_object,
- ne,
- scratch2,
- Operand(static_cast<int32_t>(kStringTag)));
-}
-
-
-// Generate code to load the length from a string object and return the length.
-// If the receiver object is not a string or a wrapped string object the
-// execution continues at the miss label. The register containing the
-// receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
-
- // Load length directly from the string.
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
-
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
-
- // Unwrap the value and check if the wrapped value is a string.
- __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
-}
-
-
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@@ -467,11 +412,11 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
__ li(scratch1, constant);
__ Branch(miss_label, ne, value_reg, Operand(scratch1));
- } else if (FLAG_track_fields && representation.IsSmi()) {
+ } else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
@@ -545,15 +490,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ sw(storage_reg, FieldMemOperand(receiver_reg, offset));
} else {
__ sw(value_reg, FieldMemOperand(receiver_reg, offset));
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
@@ -571,15 +516,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Get the properties array
__ lw(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ sw(storage_reg, FieldMemOperand(scratch1, offset));
} else {
__ sw(value_reg, FieldMemOperand(scratch1, offset));
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
@@ -630,11 +575,11 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -676,7 +621,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
int offset = object->map()->instance_size() + (index * kPointerSize);
__ sw(value_reg, FieldMemOperand(receiver_reg, offset));
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@@ -700,7 +645,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ sw(value_reg, FieldMemOperand(scratch1, offset));
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@@ -770,13 +715,14 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map,
- Register receiver,
- Register scratch_in,
- int argc,
- Register* values) {
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
ASSERT(!receiver.is(scratch_in));
// Preparing to push, adjust sp.
__ Subu(sp, sp, Operand((argc + 1) * kPointerSize));
@@ -843,7 +789,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ li(api_function_address, Operand(ref));
// Jump to stub.
- CallApiFunctionStub stub(true, call_data_undefined, argc);
+ CallApiFunctionStub stub(is_store, call_data_undefined, argc);
__ TailCallStub(&stub);
}
@@ -867,9 +813,6 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
- // Make sure that the type feedback oracle harvests the receiver map.
- // TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ li(scratch1, Operand(receiver_map));
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -1064,15 +1007,6 @@ void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization,
- Handle<Map> receiver_map) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver_map,
- receiver(), scratch3(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
@@ -1246,24 +1180,6 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, handle(object->map()),
- receiver(), scratch3(), 1, values);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
@@ -1271,20 +1187,16 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
// -- ra : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
- Register receiver = a1;
- Register value = a0;
// Save value register, so we can restore it later.
- __ push(value);
+ __ push(value());
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
@@ -1294,7 +1206,7 @@ void StoreStubCompiler::GenerateStoreViaSetter(
FieldMemOperand(
receiver, JSGlobalObject::kGlobalReceiverOffset));
}
- __ Push(receiver, value);
+ __ Push(receiver, value());
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
@@ -1322,21 +1234,6 @@ void StoreStubCompiler::GenerateStoreViaSetter(
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
- }
-
- // Stub is never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
@@ -1344,10 +1241,6 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 3, 1);
- // Handle store cache miss.
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
@@ -1381,16 +1274,21 @@ Register* KeyedLoadStubCompiler::registers() {
}
+Register StoreStubCompiler::value() {
+ return a0;
+}
+
+
Register* StoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { a1, a2, a0, a3, t0, t1 };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { a1, a2, a3, t0, t1 };
return registers;
}
Register* KeyedStoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { a2, a1, a0, a3, t0, t1 };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { a2, a1, a3, t0, t1 };
return registers;
}
@@ -1524,6 +1422,17 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
}
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver(), value());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index 212bb0b9c..d413b090b 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -538,7 +538,7 @@ inherits(NumberMirror, ValueMirror);
NumberMirror.prototype.toText = function() {
- return %NumberToString(this.value_);
+ return %_NumberToString(this.value_);
};
@@ -889,9 +889,12 @@ FunctionMirror.prototype.script = function() {
// Return script if function is resolved. Otherwise just fall through
// to return undefined.
if (this.resolved()) {
+ if (this.script_) {
+ return this.script_;
+ }
var script = %FunctionGetScript(this.value_);
if (script) {
- return MakeMirror(script);
+ return this.script_ = MakeMirror(script);
}
}
};
@@ -917,9 +920,11 @@ FunctionMirror.prototype.sourcePosition_ = function() {
* @return {Location or undefined} in-script location for the function begin
*/
FunctionMirror.prototype.sourceLocation = function() {
- if (this.resolved() && this.script()) {
- return this.script().locationFromPosition(this.sourcePosition_(),
- true);
+ if (this.resolved()) {
+ var script = this.script();
+ if (script) {
+ return script.locationFromPosition(this.sourcePosition_(), true);
+ }
}
};
@@ -949,7 +954,10 @@ FunctionMirror.prototype.constructedBy = function(opt_max_instances) {
FunctionMirror.prototype.scopeCount = function() {
if (this.resolved()) {
- return %GetFunctionScopeCount(this.value());
+ if (IS_UNDEFINED(this.scopeCount_)) {
+ this.scopeCount_ = %GetFunctionScopeCount(this.value());
+ }
+ return this.scopeCount_;
} else {
return 0;
}
@@ -1506,7 +1514,10 @@ FrameDetails.prototype.returnValue = function() {
FrameDetails.prototype.scopeCount = function() {
- return %GetScopeCount(this.break_id_, this.frameId());
+ if (IS_UNDEFINED(this.scopeCount_)) {
+ this.scopeCount_ = %GetScopeCount(this.break_id_, this.frameId());
+ }
+ return this.scopeCount_;
};
@@ -1532,12 +1543,21 @@ function FrameMirror(break_id, index) {
inherits(FrameMirror, Mirror);
+FrameMirror.prototype.details = function() {
+ return this.details_;
+};
+
+
FrameMirror.prototype.index = function() {
return this.index_;
};
FrameMirror.prototype.func = function() {
+ if (this.func_) {
+ return this.func_;
+ }
+
// Get the function for this frame from the VM.
var f = this.details_.func();
@@ -1545,7 +1565,7 @@ FrameMirror.prototype.func = function() {
// value returned from the VM might be a string if the function for the
// frame is unresolved.
if (IS_FUNCTION(f)) {
- return MakeMirror(f);
+ return this.func_ = MakeMirror(f);
} else {
return new UnresolvedFunctionMirror(f);
}
@@ -1628,39 +1648,36 @@ FrameMirror.prototype.sourcePosition = function() {
FrameMirror.prototype.sourceLocation = function() {
- if (this.func().resolved() && this.func().script()) {
- return this.func().script().locationFromPosition(this.sourcePosition(),
- true);
+ var func = this.func();
+ if (func.resolved()) {
+ var script = func.script();
+ if (script) {
+ return script.locationFromPosition(this.sourcePosition(), true);
+ }
}
};
FrameMirror.prototype.sourceLine = function() {
- if (this.func().resolved()) {
- var location = this.sourceLocation();
- if (location) {
- return location.line;
- }
+ var location = this.sourceLocation();
+ if (location) {
+ return location.line;
}
};
FrameMirror.prototype.sourceColumn = function() {
- if (this.func().resolved()) {
- var location = this.sourceLocation();
- if (location) {
- return location.column;
- }
+ var location = this.sourceLocation();
+ if (location) {
+ return location.column;
}
};
FrameMirror.prototype.sourceLineText = function() {
- if (this.func().resolved()) {
- var location = this.sourceLocation();
- if (location) {
- return location.sourceText();
- }
+ var location = this.sourceLocation();
+ if (location) {
+ return location.sourceText();
}
};
@@ -1675,6 +1692,19 @@ FrameMirror.prototype.scope = function(index) {
};
+FrameMirror.prototype.allScopes = function(opt_ignore_nested_scopes) {
+ var scopeDetails = %GetAllScopesDetails(this.break_id_,
+ this.details_.frameId(),
+ this.details_.inlinedFrameIndex(),
+ !!opt_ignore_nested_scopes);
+ var result = [];
+ for (var i = 0; i < scopeDetails.length; ++i) {
+ result.push(new ScopeMirror(this, UNDEFINED, i, scopeDetails[i]));
+ }
+ return result;
+};
+
+
FrameMirror.prototype.stepInPositions = function() {
var script = this.func().script();
var funcOffset = this.func().sourcePosition_();
@@ -1793,9 +1823,10 @@ FrameMirror.prototype.sourceAndPositionText = function() {
var result = '';
var func = this.func();
if (func.resolved()) {
- if (func.script()) {
- if (func.script().name()) {
- result += func.script().name();
+ var script = func.script();
+ if (script) {
+ if (script.name()) {
+ result += script.name();
} else {
result += '[unnamed]';
}
@@ -1865,17 +1896,18 @@ FrameMirror.prototype.toText = function(opt_locals) {
var kScopeDetailsTypeIndex = 0;
var kScopeDetailsObjectIndex = 1;
-function ScopeDetails(frame, fun, index) {
+function ScopeDetails(frame, fun, index, opt_details) {
if (frame) {
this.break_id_ = frame.break_id_;
- this.details_ = %GetScopeDetails(frame.break_id_,
+ this.details_ = opt_details ||
+ %GetScopeDetails(frame.break_id_,
frame.details_.frameId(),
frame.details_.inlinedFrameIndex(),
index);
this.frame_id_ = frame.details_.frameId();
this.inlined_frame_id_ = frame.details_.inlinedFrameIndex();
} else {
- this.details_ = %GetFunctionScopeDetails(fun.value(), index);
+ this.details_ = opt_details || %GetFunctionScopeDetails(fun.value(), index);
this.fun_value_ = fun.value();
this.break_id_ = undefined;
}
@@ -1921,10 +1953,11 @@ ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) {
* @param {FrameMirror} frame The frame this scope is a part of
* @param {FunctionMirror} function The function this scope is a part of
* @param {number} index The scope index in the frame
+ * @param {Array=} opt_details Raw scope details data
* @constructor
* @extends Mirror
*/
-function ScopeMirror(frame, function, index) {
+function ScopeMirror(frame, function, index, opt_details) {
%_CallFunction(this, SCOPE_TYPE, Mirror);
if (frame) {
this.frame_index_ = frame.index_;
@@ -1932,11 +1965,16 @@ function ScopeMirror(frame, function, index) {
this.frame_index_ = undefined;
}
this.scope_index_ = index;
- this.details_ = new ScopeDetails(frame, function, index);
+ this.details_ = new ScopeDetails(frame, function, index, opt_details);
}
inherits(ScopeMirror, Mirror);
+ScopeMirror.prototype.details = function() {
+ return this.details_;
+};
+
+
ScopeMirror.prototype.frameIndex = function() {
return this.frame_index_;
};
@@ -2575,8 +2613,9 @@ JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
content.receiver = this.serializeReference(mirror.receiver());
var func = mirror.func();
content.func = this.serializeReference(func);
- if (func.script()) {
- content.script = this.serializeReference(func.script());
+ var script = func.script();
+ if (script) {
+ content.script = this.serializeReference(script);
}
content.constructCall = mirror.isConstructCall();
content.atReturn = mirror.isAtReturn();
diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/object-observe.js
index 499b27eca..e822f0bd4 100644
--- a/deps/v8/src/object-observe.js
+++ b/deps/v8/src/object-observe.js
@@ -56,40 +56,86 @@
// implementation of (1) and (2) have "optimized" states which represent
// common cases which can be handled more efficiently.
-var observationState = %GetObservationState();
-if (IS_UNDEFINED(observationState.callbackInfoMap)) {
- observationState.callbackInfoMap = %ObservationWeakMapCreate();
- observationState.objectInfoMap = %ObservationWeakMapCreate();
- observationState.notifierObjectInfoMap = %ObservationWeakMapCreate();
- observationState.pendingObservers = null;
- observationState.nextCallbackPriority = 0;
-}
-
-function ObservationWeakMap(map) {
- this.map_ = map;
-}
-
-ObservationWeakMap.prototype = {
- get: function(key) {
- key = %UnwrapGlobalProxy(key);
- if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
- return %WeakCollectionGet(this.map_, key);
- },
- set: function(key, value) {
- key = %UnwrapGlobalProxy(key);
- if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
- %WeakCollectionSet(this.map_, key, value);
- },
- has: function(key) {
- return !IS_UNDEFINED(this.get(key));
+var observationState;
+
+function GetObservationState() {
+ if (IS_UNDEFINED(observationState))
+ observationState = %GetObservationState();
+
+ if (IS_UNDEFINED(observationState.callbackInfoMap)) {
+ observationState.callbackInfoMap = %ObservationWeakMapCreate();
+ observationState.objectInfoMap = %ObservationWeakMapCreate();
+ observationState.notifierObjectInfoMap = %ObservationWeakMapCreate();
+ observationState.pendingObservers = null;
+ observationState.nextCallbackPriority = 0;
}
-};
-var callbackInfoMap =
- new ObservationWeakMap(observationState.callbackInfoMap);
-var objectInfoMap = new ObservationWeakMap(observationState.objectInfoMap);
-var notifierObjectInfoMap =
- new ObservationWeakMap(observationState.notifierObjectInfoMap);
+ return observationState;
+}
+
+function GetWeakMapWrapper() {
+ function MapWrapper(map) {
+ this.map_ = map;
+ };
+
+ MapWrapper.prototype = {
+ get: function(key) {
+ key = %UnwrapGlobalProxy(key);
+ if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
+ return %WeakCollectionGet(this.map_, key);
+ },
+ set: function(key, value) {
+ key = %UnwrapGlobalProxy(key);
+ if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
+ %WeakCollectionSet(this.map_, key, value);
+ },
+ has: function(key) {
+ return !IS_UNDEFINED(this.get(key));
+ }
+ };
+
+ return MapWrapper;
+}
+
+var contextMaps;
+
+function GetContextMaps() {
+ if (IS_UNDEFINED(contextMaps)) {
+ var map = GetWeakMapWrapper();
+ var observationState = GetObservationState();
+ contextMaps = {
+ callbackInfoMap: new map(observationState.callbackInfoMap),
+ objectInfoMap: new map(observationState.objectInfoMap),
+ notifierObjectInfoMap: new map(observationState.notifierObjectInfoMap)
+ };
+ }
+
+ return contextMaps;
+}
+
+function GetCallbackInfoMap() {
+ return GetContextMaps().callbackInfoMap;
+}
+
+function GetObjectInfoMap() {
+ return GetContextMaps().objectInfoMap;
+}
+
+function GetNotifierObjectInfoMap() {
+ return GetContextMaps().notifierObjectInfoMap;
+}
+
+function GetPendingObservers() {
+ return GetObservationState().pendingObservers;
+}
+
+function SetPendingObservers(pendingObservers) {
+ GetObservationState().pendingObservers = pendingObservers;
+}
+
+function GetNextCallbackPriority() {
+ return GetObservationState().nextCallbackPriority++;
+}
function nullProtoObject() {
return { __proto__: null };
@@ -180,23 +226,23 @@ function ObjectInfoGetOrCreate(object) {
performing: null,
performingCount: 0,
};
- objectInfoMap.set(object, objectInfo);
+ GetObjectInfoMap().set(object, objectInfo);
}
return objectInfo;
}
function ObjectInfoGet(object) {
- return objectInfoMap.get(object);
+ return GetObjectInfoMap().get(object);
}
function ObjectInfoGetFromNotifier(notifier) {
- return notifierObjectInfoMap.get(notifier);
+ return GetNotifierObjectInfoMap().get(notifier);
}
function ObjectInfoGetNotifier(objectInfo) {
if (IS_NULL(objectInfo.notifier)) {
objectInfo.notifier = { __proto__: notifierPrototype };
- notifierObjectInfoMap.set(objectInfo.notifier, objectInfo);
+ GetNotifierObjectInfoMap().set(objectInfo.notifier, objectInfo);
}
return objectInfo.notifier;
@@ -302,16 +348,16 @@ function AcceptArgIsValid(arg) {
// priority. When a change record must be enqueued for the callback, it
// normalizes. When delivery clears any pending change records, it re-optimizes.
function CallbackInfoGet(callback) {
- return callbackInfoMap.get(callback);
+ return GetCallbackInfoMap().get(callback);
}
function CallbackInfoGetOrCreate(callback) {
- var callbackInfo = callbackInfoMap.get(callback);
+ var callbackInfo = GetCallbackInfoMap().get(callback);
if (!IS_UNDEFINED(callbackInfo))
return callbackInfo;
- var priority = observationState.nextCallbackPriority++
- callbackInfoMap.set(callback, priority);
+ var priority = GetNextCallbackPriority();
+ GetCallbackInfoMap().set(callback, priority);
return priority;
}
@@ -323,12 +369,12 @@ function CallbackInfoGetPriority(callbackInfo) {
}
function CallbackInfoNormalize(callback) {
- var callbackInfo = callbackInfoMap.get(callback);
+ var callbackInfo = GetCallbackInfoMap().get(callback);
if (IS_NUMBER(callbackInfo)) {
var priority = callbackInfo;
callbackInfo = new InternalArray;
callbackInfo.priority = priority;
- callbackInfoMap.set(callback, callbackInfo);
+ GetCallbackInfoMap().set(callback, callbackInfo);
}
return callbackInfo;
}
@@ -390,11 +436,13 @@ function ObserverEnqueueIfActive(observer, objectInfo, changeRecord,
}
var callbackInfo = CallbackInfoNormalize(callback);
- if (!observationState.pendingObservers)
- observationState.pendingObservers = nullProtoObject();
- observationState.pendingObservers[callbackInfo.priority] = callback;
+ if (IS_NULL(GetPendingObservers())) {
+ SetPendingObservers(nullProtoObject())
+ GetMicrotaskQueue().push(ObserveMicrotaskRunner);
+ %SetMicrotaskPending(true);
+ }
+ GetPendingObservers()[callbackInfo.priority] = callback;
callbackInfo.push(changeRecord);
- %SetMicrotaskPending(true);
}
function ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord, type) {
@@ -546,17 +594,17 @@ function ObjectGetNotifier(object) {
}
function CallbackDeliverPending(callback) {
- var callbackInfo = callbackInfoMap.get(callback);
+ var callbackInfo = GetCallbackInfoMap().get(callback);
if (IS_UNDEFINED(callbackInfo) || IS_NUMBER(callbackInfo))
return false;
// Clear the pending change records from callback and return it to its
// "optimized" state.
var priority = callbackInfo.priority;
- callbackInfoMap.set(callback, priority);
+ GetCallbackInfoMap().set(callback, priority);
- if (observationState.pendingObservers)
- delete observationState.pendingObservers[priority];
+ if (GetPendingObservers())
+ delete GetPendingObservers()[priority];
var delivered = [];
%MoveArrayContents(callbackInfo, delivered);
@@ -575,15 +623,14 @@ function ObjectDeliverChangeRecords(callback) {
}
function ObserveMicrotaskRunner() {
- var pendingObservers = observationState.pendingObservers;
+ var pendingObservers = GetPendingObservers();
if (pendingObservers) {
- observationState.pendingObservers = null;
+ SetPendingObservers(null);
for (var i in pendingObservers) {
CallbackDeliverPending(pendingObservers[i]);
}
}
}
-RunMicrotasks.runners.push(ObserveMicrotaskRunner);
function SetupObjectObserve() {
%CheckIsBootstrapping();
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index e33b46be7..ca025e6cf 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -264,8 +264,9 @@ void FixedTypedArray<Traits>::FixedTypedArrayVerify() {
bool JSObject::ElementsAreSafeToExamine() {
- return (FLAG_use_gvn && FLAG_use_allocation_folding) ||
- reinterpret_cast<Map*>(elements()) !=
+ // If a GC was caused while constructing this object, the elements
+ // pointer may point to a one pointer filler map.
+ return reinterpret_cast<Map*>(elements()) !=
GetHeap()->one_pointer_filler_map();
}
@@ -274,7 +275,7 @@ void JSObject::JSObjectVerify() {
VerifyHeapPointer(properties());
VerifyHeapPointer(elements());
- if (GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS) {
+ if (GetElementsKind() == SLOPPY_ARGUMENTS_ELEMENTS) {
CHECK(this->elements()->IsFixedArray());
CHECK_GE(this->elements()->length(), 2);
}
@@ -367,7 +368,7 @@ void PolymorphicCodeCache::PolymorphicCodeCacheVerify() {
void TypeFeedbackInfo::TypeFeedbackInfoVerify() {
VerifyObjectField(kStorage1Offset);
VerifyObjectField(kStorage2Offset);
- VerifyHeapPointer(type_feedback_cells());
+ VerifyHeapPointer(feedback_vector());
}
@@ -403,6 +404,13 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
void ConstantPoolArray::ConstantPoolArrayVerify() {
CHECK(IsConstantPoolArray());
+ for (int i = 0; i < count_of_code_ptr_entries(); i++) {
+ Address code_entry = get_code_ptr_entry(first_code_ptr_index() + i);
+ VerifyPointer(Code::GetCodeFromTargetAddress(code_entry));
+ }
+ for (int i = 0; i < count_of_heap_ptr_entries(); i++) {
+ VerifyObjectField(OffsetOfElementAt(first_heap_ptr_index() + i));
+ }
}
@@ -490,7 +498,6 @@ void JSMessageObject::JSMessageObjectVerify() {
VerifyObjectField(kEndPositionOffset);
VerifyObjectField(kArgumentsOffset);
VerifyObjectField(kScriptOffset);
- VerifyObjectField(kStackTraceOffset);
VerifyObjectField(kStackFramesOffset);
}
@@ -636,7 +643,7 @@ void Code::VerifyEmbeddedObjectsDependency() {
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
Object* obj = it.rinfo()->target_object();
- if (IsWeakEmbeddedObject(kind(), obj)) {
+ if (IsWeakObject(obj)) {
if (obj->IsMap()) {
Map* map = Map::cast(obj);
CHECK(map->dependent_code()->Contains(
@@ -767,7 +774,8 @@ void JSArrayBufferView::JSArrayBufferViewVerify() {
CHECK(IsJSArrayBufferView());
JSObjectVerify();
VerifyPointer(buffer());
- CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined());
+ CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined()
+ || buffer() == Smi::FromInt(0));
VerifyPointer(byte_offset());
CHECK(byte_offset()->IsSmi() || byte_offset()->IsHeapNumber()
@@ -931,7 +939,6 @@ void Script::ScriptVerify() {
VerifyPointer(name());
line_offset()->SmiVerify();
column_offset()->SmiVerify();
- VerifyPointer(data());
VerifyPointer(wrapper());
type()->SmiVerify();
VerifyPointer(line_ends());
@@ -1054,7 +1061,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
dict->Capacity() - dict->NumberOfElements();
break;
}
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
break;
}
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 65c46f0af..9d550374e 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -59,7 +59,7 @@ PropertyDetails::PropertyDetails(Smi* smi) {
}
-Smi* PropertyDetails::AsSmi() {
+Smi* PropertyDetails::AsSmi() const {
// Ensure the upper 2 bits have the same value by sign extending it. This is
// necessary to be able to use the 31st bit of the property details.
int value = value_ << 1;
@@ -67,7 +67,7 @@ Smi* PropertyDetails::AsSmi() {
}
-PropertyDetails PropertyDetails::AsDeleted() {
+PropertyDetails PropertyDetails::AsDeleted() const {
Smi* smi = Smi::FromInt(value_ | DeletedField::encode(1));
return PropertyDetails(smi);
}
@@ -278,10 +278,9 @@ bool Object::HasValidElements() {
MaybeObject* Object::AllocateNewStorageFor(Heap* heap,
Representation representation) {
- if (FLAG_track_fields && representation.IsSmi() && IsUninitialized()) {
+ if (representation.IsSmi() && IsUninitialized()) {
return Smi::FromInt(0);
}
- if (!FLAG_track_double_fields) return this;
if (!representation.IsDouble()) return this;
if (IsUninitialized()) {
return heap->AllocateHeapNumber(0);
@@ -650,12 +649,6 @@ bool MaybeObject::IsRetryAfterGC() {
}
-bool MaybeObject::IsOutOfMemory() {
- return HAS_FAILURE_TAG(this)
- && Failure::cast(this)->IsOutOfMemoryException();
-}
-
-
bool MaybeObject::IsException() {
return this == Failure::Exception();
}
@@ -760,16 +753,6 @@ bool Object::IsDependentCode() {
}
-bool Object::IsTypeFeedbackCells() {
- if (!IsFixedArray()) return false;
- // There's actually no way to see the difference between a fixed array and
- // a cache cells array. Since this is used for asserts we can check that
- // the length is plausible though.
- if (FixedArray::cast(this)->length() % 2 != 0) return false;
- return true;
-}
-
-
bool Object::IsContext() {
if (!Object::IsHeapObject()) return false;
Map* map = HeapObject::cast(this)->map();
@@ -937,7 +920,8 @@ bool Object::IsJSGlobalProxy() {
bool result = IsHeapObject() &&
(HeapObject::cast(this)->map()->instance_type() ==
JS_GLOBAL_PROXY_TYPE);
- ASSERT(!result || IsAccessCheckNeeded());
+ ASSERT(!result ||
+ HeapObject::cast(this)->map()->is_access_check_needed());
return result;
}
@@ -962,8 +946,14 @@ bool Object::IsUndetectableObject() {
bool Object::IsAccessCheckNeeded() {
- return IsHeapObject()
- && HeapObject::cast(this)->map()->is_access_check_needed();
+ if (!IsHeapObject()) return false;
+ if (IsJSGlobalProxy()) {
+ JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
+ GlobalObject* global =
+ proxy->GetIsolate()->context()->global_object();
+ return proxy->IsDetachedFrom(global);
+ }
+ return HeapObject::cast(this)->map()->is_access_check_needed();
}
@@ -1035,6 +1025,20 @@ bool Object::IsNaN() {
}
+Handle<Object> Object::ToSmi(Isolate* isolate, Handle<Object> object) {
+ if (object->IsSmi()) return object;
+ if (object->IsHeapNumber()) {
+ double value = Handle<HeapNumber>::cast(object)->value();
+ int int_value = FastD2I(value);
+ if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
+ return handle(Smi::FromInt(int_value), isolate);
+ }
+ }
+ return Handle<Object>();
+}
+
+
+// TODO(ishell): Use handlified version instead.
MaybeObject* Object::ToSmi() {
if (IsSmi()) return this;
if (IsHeapNumber()) {
@@ -1053,20 +1057,23 @@ bool Object::HasSpecificClassOf(String* name) {
}
-MaybeObject* Object::GetElement(Isolate* isolate, uint32_t index) {
+Handle<Object> Object::GetElement(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index) {
// GetElement can trigger a getter which can cause allocation.
// This was not always the case. This ASSERT is here to catch
// leftover incorrect uses.
ASSERT(AllowHeapAllocation::IsAllowed());
- return GetElementWithReceiver(isolate, this, index);
+ return Object::GetElementWithReceiver(isolate, object, object, index);
}
-Object* Object::GetElementNoExceptionThrown(Isolate* isolate, uint32_t index) {
- MaybeObject* maybe = GetElementWithReceiver(isolate, this, index);
- ASSERT(!maybe->IsFailure());
- Object* result = NULL; // Initialization to please compiler.
- maybe->ToObject(&result);
+Handle<Object> Object::GetElementNoExceptionThrown(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index) {
+ Handle<Object> result =
+ Object::GetElementWithReceiver(isolate, object, object, index);
+ CHECK_NOT_EMPTY_HANDLE(isolate, result);
return result;
}
@@ -1222,11 +1229,6 @@ bool Failure::IsInternalError() const {
}
-bool Failure::IsOutOfMemoryException() const {
- return type() == OUT_OF_MEMORY_EXCEPTION;
-}
-
-
AllocationSpace Failure::allocation_space() const {
ASSERT_EQ(RETRY_AFTER_GC, type());
return static_cast<AllocationSpace>((value() >> kFailureTypeTagSize)
@@ -1244,11 +1246,6 @@ Failure* Failure::Exception() {
}
-Failure* Failure::OutOfMemoryException(intptr_t value) {
- return Construct(OUT_OF_MEMORY_EXCEPTION, value);
-}
-
-
intptr_t Failure::value() const {
return static_cast<intptr_t>(
reinterpret_cast<uintptr_t>(this) >> kFailureTagSize);
@@ -1396,6 +1393,11 @@ void HeapObject::IteratePointer(ObjectVisitor* v, int offset) {
}
+void HeapObject::IterateNextCodeLink(ObjectVisitor* v, int offset) {
+ v->VisitNextCodeLink(reinterpret_cast<Object**>(FIELD_ADDR(this, offset)));
+}
+
+
double HeapNumber::value() {
return READ_DOUBLE_FIELD(this, kValueOffset);
}
@@ -1474,7 +1476,8 @@ void AllocationSite::MarkZombie() {
// elements kind is the initial elements kind.
AllocationSiteMode AllocationSite::GetMode(
ElementsKind boilerplate_elements_kind) {
- if (IsFastSmiElementsKind(boilerplate_elements_kind)) {
+ if (FLAG_pretenuring_call_new ||
+ IsFastSmiElementsKind(boilerplate_elements_kind)) {
return TRACK_ALLOCATION_SITE;
}
@@ -1484,8 +1487,9 @@ AllocationSiteMode AllocationSite::GetMode(
AllocationSiteMode AllocationSite::GetMode(ElementsKind from,
ElementsKind to) {
- if (IsFastSmiElementsKind(from) &&
- IsMoreGeneralElementsKindTransition(from, to)) {
+ if (FLAG_pretenuring_call_new ||
+ (IsFastSmiElementsKind(from) &&
+ IsMoreGeneralElementsKindTransition(from, to))) {
return TRACK_ALLOCATION_SITE;
}
@@ -1564,9 +1568,7 @@ inline bool AllocationSite::DigestPretenuringFeedback() {
set_pretenure_decision(result);
if (current_mode != GetPretenureMode()) {
decision_changed = true;
- dependent_code()->MarkCodeForDeoptimization(
- GetIsolate(),
- DependentCode::kAllocationSiteTenuringChangedGroup);
+ set_deopt_dependent_code(true);
}
}
@@ -1598,73 +1600,79 @@ void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
}
-MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
- uint32_t count,
- EnsureElementsMode mode) {
- ElementsKind current_kind = map()->elements_kind();
+void JSObject::EnsureCanContainElements(Handle<JSObject> object,
+ Object** objects,
+ uint32_t count,
+ EnsureElementsMode mode) {
+ ElementsKind current_kind = object->map()->elements_kind();
ElementsKind target_kind = current_kind;
- ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
- bool is_holey = IsFastHoleyElementsKind(current_kind);
- if (current_kind == FAST_HOLEY_ELEMENTS) return this;
- Heap* heap = GetHeap();
- Object* the_hole = heap->the_hole_value();
- for (uint32_t i = 0; i < count; ++i) {
- Object* current = *objects++;
- if (current == the_hole) {
- is_holey = true;
- target_kind = GetHoleyElementsKind(target_kind);
- } else if (!current->IsSmi()) {
- if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current->IsNumber()) {
- if (IsFastSmiElementsKind(target_kind)) {
- if (is_holey) {
- target_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
- } else {
- target_kind = FAST_DOUBLE_ELEMENTS;
+ {
+ DisallowHeapAllocation no_allocation;
+ ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
+ bool is_holey = IsFastHoleyElementsKind(current_kind);
+ if (current_kind == FAST_HOLEY_ELEMENTS) return;
+ Heap* heap = object->GetHeap();
+ Object* the_hole = heap->the_hole_value();
+ for (uint32_t i = 0; i < count; ++i) {
+ Object* current = *objects++;
+ if (current == the_hole) {
+ is_holey = true;
+ target_kind = GetHoleyElementsKind(target_kind);
+ } else if (!current->IsSmi()) {
+ if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current->IsNumber()) {
+ if (IsFastSmiElementsKind(target_kind)) {
+ if (is_holey) {
+ target_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
+ } else {
+ target_kind = FAST_DOUBLE_ELEMENTS;
+ }
}
+ } else if (is_holey) {
+ target_kind = FAST_HOLEY_ELEMENTS;
+ break;
+ } else {
+ target_kind = FAST_ELEMENTS;
}
- } else if (is_holey) {
- target_kind = FAST_HOLEY_ELEMENTS;
- break;
- } else {
- target_kind = FAST_ELEMENTS;
}
}
}
-
if (target_kind != current_kind) {
- return TransitionElementsKind(target_kind);
+ TransitionElementsKind(object, target_kind);
}
- return this;
}
-MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements,
- uint32_t length,
- EnsureElementsMode mode) {
- if (elements->map() != GetHeap()->fixed_double_array_map()) {
- ASSERT(elements->map() == GetHeap()->fixed_array_map() ||
- elements->map() == GetHeap()->fixed_cow_array_map());
+void JSObject::EnsureCanContainElements(Handle<JSObject> object,
+ Handle<FixedArrayBase> elements,
+ uint32_t length,
+ EnsureElementsMode mode) {
+ Heap* heap = object->GetHeap();
+ if (elements->map() != heap->fixed_double_array_map()) {
+ ASSERT(elements->map() == heap->fixed_array_map() ||
+ elements->map() == heap->fixed_cow_array_map());
if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) {
mode = DONT_ALLOW_DOUBLE_ELEMENTS;
}
- Object** objects = FixedArray::cast(elements)->GetFirstElementAddress();
- return EnsureCanContainElements(objects, length, mode);
+ Object** objects =
+ Handle<FixedArray>::cast(elements)->GetFirstElementAddress();
+ EnsureCanContainElements(object, objects, length, mode);
+ return;
}
ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
- if (GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) {
- return TransitionElementsKind(FAST_HOLEY_DOUBLE_ELEMENTS);
- } else if (GetElementsKind() == FAST_SMI_ELEMENTS) {
- FixedDoubleArray* double_array = FixedDoubleArray::cast(elements);
+ if (object->GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) {
+ TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS);
+ } else if (object->GetElementsKind() == FAST_SMI_ELEMENTS) {
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(elements);
for (uint32_t i = 0; i < length; ++i) {
if (double_array->is_the_hole(i)) {
- return TransitionElementsKind(FAST_HOLEY_DOUBLE_ELEMENTS);
+ TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS);
+ return;
}
}
- return TransitionElementsKind(FAST_DOUBLE_ELEMENTS);
+ TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
}
-
- return this;
}
@@ -1733,6 +1741,11 @@ void JSObject::initialize_elements() {
ExternalArray* empty_array = GetHeap()->EmptyExternalArrayForMap(map());
ASSERT(!GetHeap()->InNewSpace(empty_array));
WRITE_FIELD(this, kElementsOffset, empty_array);
+ } else if (map()->has_fixed_typed_array_elements()) {
+ FixedTypedArrayBase* empty_array =
+ GetHeap()->EmptyFixedTypedArrayForMap(map());
+ ASSERT(!GetHeap()->InNewSpace(empty_array));
+ WRITE_FIELD(this, kElementsOffset, empty_array);
} else {
UNREACHABLE();
}
@@ -1745,7 +1758,7 @@ MaybeObject* JSObject::ResetElements() {
SeededNumberDictionary* dictionary;
MaybeObject* maybe = SeededNumberDictionary::Allocate(GetHeap(), 0);
if (!maybe->To(&dictionary)) return maybe;
- if (map() == GetHeap()->non_strict_arguments_elements_map()) {
+ if (map() == GetHeap()->sloppy_arguments_elements_map()) {
FixedArray::cast(elements())->set(1, dictionary);
} else {
set_elements(dictionary);
@@ -2088,11 +2101,11 @@ bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
}
-
void Object::VerifyApiCallResultType() {
#if ENABLE_EXTRA_CHECKS
if (!(IsSmi() ||
IsString() ||
+ IsSymbol() ||
IsSpecObject() ||
IsHeapNumber() ||
IsUndefined() ||
@@ -2182,6 +2195,15 @@ MaybeObject* FixedDoubleArray::get(int index) {
}
+Handle<Object> FixedDoubleArray::get_as_handle(int index) {
+ if (is_the_hole(index)) {
+ return GetIsolate()->factory()->the_hole_value();
+ } else {
+ return GetIsolate()->factory()->NewNumber(get_scalar(index));
+ }
+}
+
+
void FixedDoubleArray::set(int index, double value) {
ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
map() != GetHeap()->fixed_array_map());
@@ -2205,8 +2227,12 @@ bool FixedDoubleArray::is_the_hole(int index) {
}
-SMI_ACCESSORS(ConstantPoolArray, first_ptr_index, kFirstPointerIndexOffset)
-SMI_ACCESSORS(ConstantPoolArray, first_int32_index, kFirstInt32IndexOffset)
+SMI_ACCESSORS(
+ ConstantPoolArray, first_code_ptr_index, kFirstCodePointerIndexOffset)
+SMI_ACCESSORS(
+ ConstantPoolArray, first_heap_ptr_index, kFirstHeapPointerIndexOffset)
+SMI_ACCESSORS(
+ ConstantPoolArray, first_int32_index, kFirstInt32IndexOffset)
int ConstantPoolArray::first_int64_index() {
@@ -2215,12 +2241,17 @@ int ConstantPoolArray::first_int64_index() {
int ConstantPoolArray::count_of_int64_entries() {
- return first_ptr_index();
+ return first_code_ptr_index();
+}
+
+
+int ConstantPoolArray::count_of_code_ptr_entries() {
+ return first_heap_ptr_index() - first_code_ptr_index();
}
-int ConstantPoolArray::count_of_ptr_entries() {
- return first_int32_index() - first_ptr_index();
+int ConstantPoolArray::count_of_heap_ptr_entries() {
+ return first_int32_index() - first_heap_ptr_index();
}
@@ -2230,32 +2261,44 @@ int ConstantPoolArray::count_of_int32_entries() {
void ConstantPoolArray::SetEntryCounts(int number_of_int64_entries,
- int number_of_ptr_entries,
+ int number_of_code_ptr_entries,
+ int number_of_heap_ptr_entries,
int number_of_int32_entries) {
- set_first_ptr_index(number_of_int64_entries);
- set_first_int32_index(number_of_int64_entries + number_of_ptr_entries);
- set_length(number_of_int64_entries + number_of_ptr_entries +
- number_of_int32_entries);
+ int current_index = number_of_int64_entries;
+ set_first_code_ptr_index(current_index);
+ current_index += number_of_code_ptr_entries;
+ set_first_heap_ptr_index(current_index);
+ current_index += number_of_heap_ptr_entries;
+ set_first_int32_index(current_index);
+ current_index += number_of_int32_entries;
+ set_length(current_index);
}
int64_t ConstantPoolArray::get_int64_entry(int index) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= 0 && index < first_ptr_index());
+ ASSERT(index >= 0 && index < first_code_ptr_index());
return READ_INT64_FIELD(this, OffsetOfElementAt(index));
}
double ConstantPoolArray::get_int64_entry_as_double(int index) {
STATIC_ASSERT(kDoubleSize == kInt64Size);
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= 0 && index < first_ptr_index());
+ ASSERT(index >= 0 && index < first_code_ptr_index());
return READ_DOUBLE_FIELD(this, OffsetOfElementAt(index));
}
-Object* ConstantPoolArray::get_ptr_entry(int index) {
+Address ConstantPoolArray::get_code_ptr_entry(int index) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_code_ptr_index() && index < first_heap_ptr_index());
+ return reinterpret_cast<Address>(READ_FIELD(this, OffsetOfElementAt(index)));
+}
+
+
+Object* ConstantPoolArray::get_heap_ptr_entry(int index) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_ptr_index() && index < first_int32_index());
+ ASSERT(index >= first_heap_ptr_index() && index < first_int32_index());
return READ_FIELD(this, OffsetOfElementAt(index));
}
@@ -2267,9 +2310,16 @@ int32_t ConstantPoolArray::get_int32_entry(int index) {
}
+void ConstantPoolArray::set(int index, Address value) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_code_ptr_index() && index < first_heap_ptr_index());
+ WRITE_FIELD(this, OffsetOfElementAt(index), reinterpret_cast<Object*>(value));
+}
+
+
void ConstantPoolArray::set(int index, Object* value) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_ptr_index() && index < first_int32_index());
+ ASSERT(index >= first_code_ptr_index() && index < first_int32_index());
WRITE_FIELD(this, OffsetOfElementAt(index), value);
WRITE_BARRIER(GetHeap(), this, OffsetOfElementAt(index), value);
}
@@ -2277,7 +2327,7 @@ void ConstantPoolArray::set(int index, Object* value) {
void ConstantPoolArray::set(int index, int64_t value) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_int64_index() && index < first_ptr_index());
+ ASSERT(index >= first_int64_index() && index < first_code_ptr_index());
WRITE_INT64_FIELD(this, OffsetOfElementAt(index), value);
}
@@ -2285,7 +2335,7 @@ void ConstantPoolArray::set(int index, int64_t value) {
void ConstantPoolArray::set(int index, double value) {
STATIC_ASSERT(kDoubleSize == kInt64Size);
ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_int64_index() && index < first_ptr_index());
+ ASSERT(index >= first_int64_index() && index < first_code_ptr_index());
WRITE_DOUBLE_FIELD(this, OffsetOfElementAt(index), value);
}
@@ -2719,7 +2769,8 @@ void DescriptorArray::SwapSortedKeys(int first, int second) {
DescriptorArray::WhitenessWitness::WhitenessWitness(FixedArray* array)
: marking_(array->GetHeap()->incremental_marking()) {
marking_->EnterNoMarkingScope();
- ASSERT(Marking::Color(array) == Marking::WHITE_OBJECT);
+ ASSERT(!marking_->IsMarking() ||
+ Marking::Color(array) == Marking::WHITE_OBJECT);
}
@@ -2797,7 +2848,6 @@ CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
CAST_ACCESSOR(DependentCode)
-CAST_ACCESSOR(TypeFeedbackCells)
CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(JSFunctionResultCache)
CAST_ACCESSOR(NormalizedMapCache)
@@ -3645,35 +3695,64 @@ void ExternalFloat64Array::set(int index, double value) {
}
-int FixedTypedArrayBase::size() {
+void* FixedTypedArrayBase::DataPtr() {
+ return FIELD_ADDR(this, kDataOffset);
+}
+
+
+int FixedTypedArrayBase::DataSize() {
InstanceType instance_type = map()->instance_type();
int element_size;
switch (instance_type) {
- case FIXED_UINT8_ARRAY_TYPE:
- case FIXED_INT8_ARRAY_TYPE:
- case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
- element_size = 1;
- break;
- case FIXED_UINT16_ARRAY_TYPE:
- case FIXED_INT16_ARRAY_TYPE:
- element_size = 2;
- break;
- case FIXED_UINT32_ARRAY_TYPE:
- case FIXED_INT32_ARRAY_TYPE:
- case FIXED_FLOAT32_ARRAY_TYPE:
- element_size = 4;
- break;
- case FIXED_FLOAT64_ARRAY_TYPE:
- element_size = 8;
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ element_size = size; \
break;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
default:
UNREACHABLE();
return 0;
}
- return OBJECT_POINTER_ALIGN(kDataOffset + length() * element_size);
+ return length() * element_size;
}
+int FixedTypedArrayBase::size() {
+ return OBJECT_POINTER_ALIGN(kDataOffset + DataSize());
+}
+
+
+uint8_t Uint8ArrayTraits::defaultValue() { return 0; }
+
+
+uint8_t Uint8ClampedArrayTraits::defaultValue() { return 0; }
+
+
+int8_t Int8ArrayTraits::defaultValue() { return 0; }
+
+
+uint16_t Uint16ArrayTraits::defaultValue() { return 0; }
+
+
+int16_t Int16ArrayTraits::defaultValue() { return 0; }
+
+
+uint32_t Uint32ArrayTraits::defaultValue() { return 0; }
+
+
+int32_t Int32ArrayTraits::defaultValue() { return 0; }
+
+
+float Float32ArrayTraits::defaultValue() {
+ return static_cast<float>(OS::nan_value());
+}
+
+
+double Float64ArrayTraits::defaultValue() { return OS::nan_value(); }
+
+
template <class Traits>
typename Traits::ElementType FixedTypedArray<Traits>::get_scalar(int index) {
ASSERT((index >= 0) && (index < this->length()));
@@ -3709,6 +3788,47 @@ void FixedTypedArray<Float64ArrayTraits>::set(
template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::from_int(int value) {
+ return static_cast<ElementType>(value);
+}
+
+
+template <> inline
+uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from_int(int value) {
+ if (value < 0) return 0;
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(value);
+}
+
+
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::from_double(
+ double value) {
+ return static_cast<ElementType>(DoubleToInt32(value));
+}
+
+
+template<> inline
+uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from_double(double value) {
+ if (value < 0) return 0;
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(lrint(value));
+}
+
+
+template<> inline
+float FixedTypedArray<Float32ArrayTraits>::from_double(double value) {
+ return static_cast<float>(value);
+}
+
+
+template<> inline
+double FixedTypedArray<Float64ArrayTraits>::from_double(double value) {
+ return value;
+}
+
+
+template <class Traits>
MaybeObject* FixedTypedArray<Traits>::get(int index) {
return Traits::ToObject(GetHeap(), get_scalar(index));
}
@@ -3719,10 +3839,10 @@ MaybeObject* FixedTypedArray<Traits>::SetValue(uint32_t index, Object* value) {
if (index < static_cast<uint32_t>(length())) {
if (value->IsSmi()) {
int int_value = Smi::cast(value)->value();
- cast_value = static_cast<ElementType>(int_value);
+ cast_value = from_int(int_value);
} else if (value->IsHeapNumber()) {
double double_value = HeapNumber::cast(value)->value();
- cast_value = static_cast<ElementType>(DoubleToInt32(double_value));
+ cast_value = from_double(double_value);
} else {
// Clamp undefined to the default value. All other types have been
// converted to a number type further up in the call chain.
@@ -3854,7 +3974,8 @@ int HeapObject::SizeFromMap(Map* map) {
if (instance_type == CONSTANT_POOL_ARRAY_TYPE) {
return ConstantPoolArray::SizeFor(
reinterpret_cast<ConstantPoolArray*>(this)->count_of_int64_entries(),
- reinterpret_cast<ConstantPoolArray*>(this)->count_of_ptr_entries(),
+ reinterpret_cast<ConstantPoolArray*>(this)->count_of_code_ptr_entries(),
+ reinterpret_cast<ConstantPoolArray*>(this)->count_of_heap_ptr_entries(),
reinterpret_cast<ConstantPoolArray*>(this)->count_of_int32_entries());
}
if (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
@@ -3998,8 +4119,7 @@ void Map::set_is_shared(bool value) {
bool Map::is_shared() {
- return IsShared::decode(bit_field3());
-}
+ return IsShared::decode(bit_field3()); }
void Map::set_dictionary_map(bool value) {
@@ -4045,7 +4165,6 @@ void Map::deprecate() {
bool Map::is_deprecated() {
- if (!FLAG_track_fields) return false;
return Deprecated::decode(bit_field3());
}
@@ -4056,7 +4175,6 @@ void Map::set_migration_target(bool value) {
bool Map::is_migration_target() {
- if (!FLAG_track_fields) return false;
return IsMigrationTarget::decode(bit_field3());
}
@@ -4090,22 +4208,11 @@ bool Map::CanBeDeprecated() {
int descriptor = LastAdded();
for (int i = 0; i <= descriptor; i++) {
PropertyDetails details = instance_descriptors()->GetDetails(i);
- if (FLAG_track_fields && details.representation().IsNone()) {
- return true;
- }
- if (FLAG_track_fields && details.representation().IsSmi()) {
- return true;
- }
- if (FLAG_track_double_fields && details.representation().IsDouble()) {
- return true;
- }
- if (FLAG_track_heap_object_fields &&
- details.representation().IsHeapObject()) {
- return true;
- }
- if (FLAG_track_fields && details.type() == CONSTANT) {
- return true;
- }
+ if (details.representation().IsNone()) return true;
+ if (details.representation().IsSmi()) return true;
+ if (details.representation().IsDouble()) return true;
+ if (details.representation().IsHeapObject()) return true;
+ if (details.type() == CONSTANT) return true;
}
return false;
}
@@ -4211,16 +4318,8 @@ InlineCacheState Code::ic_state() {
ExtraICState Code::extra_ic_state() {
- ASSERT((is_inline_cache_stub() && !needs_extended_extra_ic_state(kind()))
- || ic_state() == DEBUG_STUB);
- return ExtractExtraICStateFromFlags(flags());
-}
-
-
-ExtraICState Code::extended_extra_ic_state() {
ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
- ASSERT(needs_extended_extra_ic_state(kind()));
- return ExtractExtendedExtraICStateFromFlags(flags());
+ return ExtractExtraICStateFromFlags(flags());
}
@@ -4229,12 +4328,6 @@ Code::StubType Code::type() {
}
-int Code::arguments_count() {
- ASSERT(kind() == STUB || is_handler());
- return ExtractArgumentsCountFromFlags(flags());
-}
-
-
// For initialization.
void Code::set_raw_kind_specific_flags1(int value) {
WRITE_INT_FIELD(this, kKindSpecificFlags1Offset, value);
@@ -4438,7 +4531,7 @@ void Code::set_back_edges_patched_for_osr(bool value) {
byte Code::to_boolean_state() {
- return extended_extra_ic_state();
+ return extra_ic_state();
}
@@ -4509,18 +4602,13 @@ Code::Flags Code::ComputeFlags(Kind kind,
InlineCacheState ic_state,
ExtraICState extra_ic_state,
StubType type,
- int argc,
InlineCacheHolderFlag holder) {
- ASSERT(argc <= Code::kMaxArguments);
// Compute the bit mask.
unsigned int bits = KindField::encode(kind)
| ICStateField::encode(ic_state)
| TypeField::encode(type)
- | ExtendedExtraICStateField::encode(extra_ic_state)
+ | ExtraICStateField::encode(extra_ic_state)
| CacheHolderField::encode(holder);
- if (!Code::needs_extended_extra_ic_state(kind)) {
- bits |= (argc << kArgumentsCountShift);
- }
return static_cast<Flags>(bits);
}
@@ -4528,9 +4616,15 @@ Code::Flags Code::ComputeFlags(Kind kind,
Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
ExtraICState extra_ic_state,
InlineCacheHolderFlag holder,
- StubType type,
- int argc) {
- return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, argc, holder);
+ StubType type) {
+ return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, holder);
+}
+
+
+Code::Flags Code::ComputeHandlerFlags(Kind handler_kind,
+ StubType type,
+ InlineCacheHolderFlag holder) {
+ return ComputeFlags(Code::HANDLER, MONOMORPHIC, handler_kind, type, holder);
}
@@ -4549,22 +4643,11 @@ ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
}
-ExtraICState Code::ExtractExtendedExtraICStateFromFlags(
- Flags flags) {
- return ExtendedExtraICStateField::decode(flags);
-}
-
-
Code::StubType Code::ExtractTypeFromFlags(Flags flags) {
return TypeField::decode(flags);
}
-int Code::ExtractArgumentsCountFromFlags(Flags flags) {
- return (flags & kArgumentsCountMask) >> kArgumentsCountShift;
-}
-
-
InlineCacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
return CacheHolderField::decode(flags);
}
@@ -4593,6 +4676,39 @@ Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
}
+bool Code::IsWeakObjectInOptimizedCode(Object* object) {
+ ASSERT(is_optimized_code());
+ if (object->IsMap()) {
+ return Map::cast(object)->CanTransition() &&
+ FLAG_collect_maps &&
+ FLAG_weak_embedded_maps_in_optimized_code;
+ }
+ if (object->IsJSObject() ||
+ (object->IsCell() && Cell::cast(object)->value()->IsJSObject())) {
+ return FLAG_weak_embedded_objects_in_optimized_code;
+ }
+ return false;
+}
+
+
+class Code::FindAndReplacePattern {
+ public:
+ FindAndReplacePattern() : count_(0) { }
+ void Add(Handle<Map> map_to_find, Handle<Object> obj_to_replace) {
+ ASSERT(count_ < kMaxCount);
+ find_[count_] = map_to_find;
+ replace_[count_] = obj_to_replace;
+ ++count_;
+ }
+ private:
+ static const int kMaxCount = 4;
+ int count_;
+ Handle<Map> find_[kMaxCount];
+ Handle<Object> replace_[kMaxCount];
+ friend class Code;
+};
+
+
Object* Map::prototype() {
return READ_FIELD(this, kPrototypeOffset);
}
@@ -4938,7 +5054,6 @@ ACCESSORS(Script, name, Object, kNameOffset)
ACCESSORS(Script, id, Smi, kIdOffset)
ACCESSORS_TO_SMI(Script, line_offset, kLineOffsetOffset)
ACCESSORS_TO_SMI(Script, column_offset, kColumnOffsetOffset)
-ACCESSORS(Script, data, Object, kDataOffset)
ACCESSORS(Script, context_data, Object, kContextOffset)
ACCESSORS(Script, wrapper, Foreign, kWrapperOffset)
ACCESSORS_TO_SMI(Script, type, kTypeOffset)
@@ -5147,39 +5262,21 @@ int SharedFunctionInfo::profiler_ticks() {
}
-LanguageMode SharedFunctionInfo::language_mode() {
- int hints = compiler_hints();
- if (BooleanBit::get(hints, kExtendedModeFunction)) {
- ASSERT(BooleanBit::get(hints, kStrictModeFunction));
- return EXTENDED_MODE;
- }
- return BooleanBit::get(hints, kStrictModeFunction)
- ? STRICT_MODE : CLASSIC_MODE;
+StrictMode SharedFunctionInfo::strict_mode() {
+ return BooleanBit::get(compiler_hints(), kStrictModeFunction)
+ ? STRICT : SLOPPY;
}
-void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
- // We only allow language mode transitions that go set the same language mode
- // again or go up in the chain:
- // CLASSIC_MODE -> STRICT_MODE -> EXTENDED_MODE.
- ASSERT(this->language_mode() == CLASSIC_MODE ||
- this->language_mode() == language_mode ||
- language_mode == EXTENDED_MODE);
+void SharedFunctionInfo::set_strict_mode(StrictMode strict_mode) {
+ // We only allow mode transitions from sloppy to strict.
+ ASSERT(this->strict_mode() == SLOPPY || this->strict_mode() == strict_mode);
int hints = compiler_hints();
- hints = BooleanBit::set(
- hints, kStrictModeFunction, language_mode != CLASSIC_MODE);
- hints = BooleanBit::set(
- hints, kExtendedModeFunction, language_mode == EXTENDED_MODE);
+ hints = BooleanBit::set(hints, kStrictModeFunction, strict_mode == STRICT);
set_compiler_hints(hints);
}
-bool SharedFunctionInfo::is_classic_mode() {
- return !BooleanBit::get(compiler_hints(), kStrictModeFunction);
-}
-
-BOOL_GETTER(SharedFunctionInfo, compiler_hints, is_extended_mode,
- kExtendedModeFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, inline_builtin,
kInlineBuiltin)
@@ -5450,8 +5547,8 @@ void JSFunction::ReplaceCode(Code* code) {
bool is_optimized = code->kind() == Code::OPTIMIZED_FUNCTION;
if (was_optimized && is_optimized) {
- shared()->EvictFromOptimizedCodeMap(
- this->code(), "Replacing with another optimized code");
+ shared()->EvictFromOptimizedCodeMap(this->code(),
+ "Replacing with another optimized code");
}
set_code(code);
@@ -5686,7 +5783,6 @@ JSDate* JSDate::cast(Object* obj) {
ACCESSORS(JSMessageObject, type, String, kTypeOffset)
ACCESSORS(JSMessageObject, arguments, JSArray, kArgumentsOffset)
ACCESSORS(JSMessageObject, script, Object, kScriptOffset)
-ACCESSORS(JSMessageObject, stack_trace, Object, kStackTraceOffset)
ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
@@ -5705,12 +5801,14 @@ ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
ACCESSORS(Code, raw_type_feedback_info, Object, kTypeFeedbackInfoOffset)
+ACCESSORS(Code, next_code_link, Object, kNextCodeLinkOffset)
void Code::WipeOutHeader() {
WRITE_FIELD(this, kRelocationInfoOffset, NULL);
WRITE_FIELD(this, kHandlerTableOffset, NULL);
WRITE_FIELD(this, kDeoptimizationDataOffset, NULL);
+ WRITE_FIELD(this, kConstantPoolOffset, NULL);
// Do not wipe out e.g. a minor key.
if (!READ_FIELD(this, kTypeFeedbackInfoOffset)->IsSmi()) {
WRITE_FIELD(this, kTypeFeedbackInfoOffset, NULL);
@@ -5732,20 +5830,6 @@ void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
}
-Object* Code::next_code_link() {
- CHECK(kind() == OPTIMIZED_FUNCTION);
- return raw_type_feedback_info();
-}
-
-
-void Code::set_next_code_link(Object* value, WriteBarrierMode mode) {
- CHECK(kind() == OPTIMIZED_FUNCTION);
- set_raw_type_feedback_info(value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
- value, mode);
-}
-
-
int Code::stub_info() {
ASSERT(kind() == COMPARE_IC || kind() == COMPARE_NIL_IC ||
kind() == BINARY_OP_IC || kind() == LOAD_IC);
@@ -5932,7 +6016,7 @@ ElementsKind JSObject::GetElementsKind() {
fixed_array->IsFixedArray() &&
fixed_array->IsDictionary()) ||
(kind > DICTIONARY_ELEMENTS));
- ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
+ ASSERT((kind != SLOPPY_ARGUMENTS_ELEMENTS) ||
(elements()->IsFixedArray() && elements()->length() >= 2));
}
#endif
@@ -5980,8 +6064,8 @@ bool JSObject::HasDictionaryElements() {
}
-bool JSObject::HasNonStrictArgumentsElements() {
- return GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+bool JSObject::HasSloppyArgumentsElements() {
+ return GetElementsKind() == SLOPPY_ARGUMENTS_ELEMENTS;
}
@@ -6013,6 +6097,20 @@ bool JSObject::HasFixedTypedArrayElements() {
}
+#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype, size) \
+bool JSObject::HasFixed##Type##Elements() { \
+ HeapObject* array = elements(); \
+ ASSERT(array != NULL); \
+ if (!array->IsHeapObject()) \
+ return false; \
+ return array->map()->instance_type() == FIXED_##TYPE##_ARRAY_TYPE; \
+}
+
+TYPED_ARRAYS(FIXED_TYPED_ELEMENTS_CHECK)
+
+#undef FIXED_TYPED_ELEMENTS_CHECK
+
+
bool JSObject::HasNamedInterceptor() {
return map()->has_named_interceptor();
}
@@ -6196,7 +6294,7 @@ bool JSReceiver::HasProperty(Handle<JSReceiver> object,
Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
return JSProxy::HasPropertyWithHandler(proxy, name);
}
- return object->GetPropertyAttribute(*name) != ABSENT;
+ return GetPropertyAttribute(object, name) != ABSENT;
}
@@ -6206,25 +6304,28 @@ bool JSReceiver::HasLocalProperty(Handle<JSReceiver> object,
Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
return JSProxy::HasPropertyWithHandler(proxy, name);
}
- return object->GetLocalPropertyAttribute(*name) != ABSENT;
+ return GetLocalPropertyAttribute(object, name) != ABSENT;
}
-PropertyAttributes JSReceiver::GetPropertyAttribute(Name* key) {
+PropertyAttributes JSReceiver::GetPropertyAttribute(Handle<JSReceiver> object,
+ Handle<Name> key) {
uint32_t index;
- if (IsJSObject() && key->AsArrayIndex(&index)) {
- return GetElementAttribute(index);
+ if (object->IsJSObject() && key->AsArrayIndex(&index)) {
+ return GetElementAttribute(object, index);
}
- return GetPropertyAttributeWithReceiver(this, key);
+ return GetPropertyAttributeWithReceiver(object, object, key);
}
-PropertyAttributes JSReceiver::GetElementAttribute(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->GetElementAttributeWithHandler(this, index);
+PropertyAttributes JSReceiver::GetElementAttribute(Handle<JSReceiver> object,
+ uint32_t index) {
+ if (object->IsJSProxy()) {
+ return JSProxy::GetElementAttributeWithHandler(
+ Handle<JSProxy>::cast(object), object, index);
}
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, true);
+ return JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(object), object, index, true);
}
@@ -6257,8 +6358,8 @@ bool JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
return JSProxy::HasElementWithHandler(proxy, index);
}
- return Handle<JSObject>::cast(object)->GetElementAttributeWithReceiver(
- *object, index, true) != ABSENT;
+ return JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(object), object, index, true) != ABSENT;
}
@@ -6267,17 +6368,19 @@ bool JSReceiver::HasLocalElement(Handle<JSReceiver> object, uint32_t index) {
Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
return JSProxy::HasElementWithHandler(proxy, index);
}
- return Handle<JSObject>::cast(object)->GetElementAttributeWithReceiver(
- *object, index, false) != ABSENT;
+ return JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(object), object, index, false) != ABSENT;
}
-PropertyAttributes JSReceiver::GetLocalElementAttribute(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->GetElementAttributeWithHandler(this, index);
+PropertyAttributes JSReceiver::GetLocalElementAttribute(
+ Handle<JSReceiver> object, uint32_t index) {
+ if (object->IsJSProxy()) {
+ return JSProxy::GetElementAttributeWithHandler(
+ Handle<JSProxy>::cast(object), object, index);
}
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, false);
+ return JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(object), object, index, false);
}
@@ -6504,20 +6607,20 @@ void Map::ClearCodeCache(Heap* heap) {
}
-void JSArray::EnsureSize(int required_size) {
- ASSERT(HasFastSmiOrObjectElements());
- FixedArray* elts = FixedArray::cast(elements());
+void JSArray::EnsureSize(Handle<JSArray> array, int required_size) {
+ ASSERT(array->HasFastSmiOrObjectElements());
+ Handle<FixedArray> elts = handle(FixedArray::cast(array->elements()));
const int kArraySizeThatFitsComfortablyInNewSpace = 128;
if (elts->length() < required_size) {
// Doubling in size would be overkill, but leave some slack to avoid
// constantly growing.
- Expand(required_size + (required_size >> 3));
+ Expand(array, required_size + (required_size >> 3));
// It's a performance benefit to keep a frequently used array in new-space.
- } else if (!GetHeap()->new_space()->Contains(elts) &&
+ } else if (!array->GetHeap()->new_space()->Contains(*elts) &&
required_size < kArraySizeThatFitsComfortablyInNewSpace) {
// Expand will allocate a new backing store in new space even if the size
// we asked for isn't larger than what we had before.
- Expand(required_size);
+ Expand(array, required_size);
}
}
@@ -6535,19 +6638,19 @@ bool JSArray::AllowsSetElementsLength() {
}
-MaybeObject* JSArray::SetContent(FixedArrayBase* storage) {
- MaybeObject* maybe_result = EnsureCanContainElements(
- storage, storage->length(), ALLOW_COPIED_DOUBLE_ELEMENTS);
- if (maybe_result->IsFailure()) return maybe_result;
- ASSERT((storage->map() == GetHeap()->fixed_double_array_map() &&
- IsFastDoubleElementsKind(GetElementsKind())) ||
- ((storage->map() != GetHeap()->fixed_double_array_map()) &&
- (IsFastObjectElementsKind(GetElementsKind()) ||
- (IsFastSmiElementsKind(GetElementsKind()) &&
- FixedArray::cast(storage)->ContainsOnlySmisOrHoles()))));
- set_elements(storage);
- set_length(Smi::FromInt(storage->length()));
- return this;
+void JSArray::SetContent(Handle<JSArray> array,
+ Handle<FixedArrayBase> storage) {
+ EnsureCanContainElements(array, storage, storage->length(),
+ ALLOW_COPIED_DOUBLE_ELEMENTS);
+
+ ASSERT((storage->map() == array->GetHeap()->fixed_double_array_map() &&
+ IsFastDoubleElementsKind(array->GetElementsKind())) ||
+ ((storage->map() != array->GetHeap()->fixed_double_array_map()) &&
+ (IsFastObjectElementsKind(array->GetElementsKind()) ||
+ (IsFastSmiElementsKind(array->GetElementsKind()) &&
+ Handle<FixedArray>::cast(storage)->ContainsOnlySmisOrHoles()))));
+ array->set_elements(*storage);
+ array->set_length(Smi::FromInt(storage->length()));
}
@@ -6569,44 +6672,24 @@ MaybeObject* ConstantPoolArray::Copy() {
}
-void TypeFeedbackCells::SetAstId(int index, TypeFeedbackId id) {
- set(1 + index * 2, Smi::FromInt(id.ToInt()));
-}
-
-
-TypeFeedbackId TypeFeedbackCells::AstId(int index) {
- return TypeFeedbackId(Smi::cast(get(1 + index * 2))->value());
-}
-
-
-void TypeFeedbackCells::SetCell(int index, Cell* cell) {
- set(index * 2, cell);
-}
-
-
-Cell* TypeFeedbackCells::GetCell(int index) {
- return Cell::cast(get(index * 2));
-}
-
-
-Handle<Object> TypeFeedbackCells::UninitializedSentinel(Isolate* isolate) {
- return isolate->factory()->the_hole_value();
+Handle<Object> TypeFeedbackInfo::UninitializedSentinel(Isolate* isolate) {
+ return isolate->factory()->uninitialized_symbol();
}
-Handle<Object> TypeFeedbackCells::MegamorphicSentinel(Isolate* isolate) {
- return isolate->factory()->undefined_value();
+Handle<Object> TypeFeedbackInfo::MegamorphicSentinel(Isolate* isolate) {
+ return isolate->factory()->megamorphic_symbol();
}
-Handle<Object> TypeFeedbackCells::MonomorphicArraySentinel(Isolate* isolate,
+Handle<Object> TypeFeedbackInfo::MonomorphicArraySentinel(Isolate* isolate,
ElementsKind elements_kind) {
return Handle<Object>(Smi::FromInt(static_cast<int>(elements_kind)), isolate);
}
-Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) {
- return heap->the_hole_value();
+Object* TypeFeedbackInfo::RawUninitializedSentinel(Heap* heap) {
+ return heap->uninitialized_symbol();
}
@@ -6688,8 +6771,8 @@ bool TypeFeedbackInfo::matches_inlined_type_change_checksum(int checksum) {
}
-ACCESSORS(TypeFeedbackInfo, type_feedback_cells, TypeFeedbackCells,
- kTypeFeedbackCellsOffset)
+ACCESSORS(TypeFeedbackInfo, feedback_vector, FixedArray,
+ kFeedbackVectorOffset)
SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot)
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 909d8f742..518167cc5 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -378,7 +378,7 @@ void JSObject::PrintElements(FILE* out) {
case DICTIONARY_ELEMENTS:
elements()->Print(out);
break;
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
FixedArray* p = FixedArray::cast(elements());
PrintF(out, " parameter map:");
for (int i = 2; i < p->length(); i++) {
@@ -400,28 +400,39 @@ void JSObject::PrintTransitions(FILE* out) {
if (!map()->HasTransitionArray()) return;
TransitionArray* transitions = map()->transitions();
for (int i = 0; i < transitions->number_of_transitions(); i++) {
+ Name* key = transitions->GetKey(i);
PrintF(out, " ");
- transitions->GetKey(i)->NamePrint(out);
+ key->NamePrint(out);
PrintF(out, ": ");
- switch (transitions->GetTargetDetails(i).type()) {
- case FIELD: {
- PrintF(out, " (transition to field)\n");
- break;
+ if (key == GetHeap()->frozen_symbol()) {
+ PrintF(out, " (transition to frozen)\n");
+ } else if (key == GetHeap()->elements_transition_symbol()) {
+ PrintF(out, " (transition to ");
+ PrintElementsKind(out, transitions->GetTarget(i)->elements_kind());
+ PrintF(out, ")\n");
+ } else if (key == GetHeap()->observed_symbol()) {
+ PrintF(out, " (transition to Object.observe)\n");
+ } else {
+ switch (transitions->GetTargetDetails(i).type()) {
+ case FIELD: {
+ PrintF(out, " (transition to field)\n");
+ break;
+ }
+ case CONSTANT:
+ PrintF(out, " (transition to constant)\n");
+ break;
+ case CALLBACKS:
+ PrintF(out, " (transition to callback)\n");
+ break;
+ // Values below are never in the target descriptor array.
+ case NORMAL:
+ case HANDLER:
+ case INTERCEPTOR:
+ case TRANSITION:
+ case NONEXISTENT:
+ UNREACHABLE();
+ break;
}
- case CONSTANT:
- PrintF(out, " (transition to constant)\n");
- break;
- case CALLBACKS:
- PrintF(out, " (transition to callback)\n");
- break;
- // Values below are never in the target descriptor array.
- case NORMAL:
- case HANDLER:
- case INTERCEPTOR:
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
}
}
}
@@ -555,8 +566,8 @@ void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "TypeFeedbackInfo");
PrintF(out, " - ic_total_count: %d, ic_with_type_info_count: %d\n",
ic_total_count(), ic_with_type_info_count());
- PrintF(out, " - type_feedback_cells: ");
- type_feedback_cells()->FixedArrayPrint(out);
+ PrintF(out, " - feedback_vector: ");
+ feedback_vector()->FixedArrayPrint(out);
}
@@ -595,11 +606,14 @@ void ConstantPoolArray::ConstantPoolArrayPrint(FILE* out) {
HeapObject::PrintHeader(out, "ConstantPoolArray");
PrintF(out, " - length: %d", length());
for (int i = 0; i < length(); i++) {
- if (i < first_ptr_index()) {
+ if (i < first_code_ptr_index()) {
PrintF(out, "\n [%d]: double: %g", i, get_int64_entry_as_double(i));
+ } else if (i < first_heap_ptr_index()) {
+ PrintF(out, "\n [%d]: code target pointer: %p", i,
+ reinterpret_cast<void*>(get_code_ptr_entry(i)));
} else if (i < first_int32_index()) {
- PrintF(out, "\n [%d]: pointer: %p", i,
- reinterpret_cast<void*>(get_ptr_entry(i)));
+ PrintF(out, "\n [%d]: heap pointer: %p", i,
+ reinterpret_cast<void*>(get_heap_ptr_entry(i)));
} else {
PrintF(out, "\n [%d]: int32: %d", i, get_int32_entry(i));
}
@@ -624,8 +638,6 @@ void JSMessageObject::JSMessageObjectPrint(FILE* out) {
PrintF(out, "\n - end_position: %d", end_position());
PrintF(out, "\n - script: ");
script()->ShortPrint(out);
- PrintF(out, "\n - stack_trace: ");
- stack_trace()->ShortPrint(out);
PrintF(out, "\n - stack_frames: ");
stack_frames()->ShortPrint(out);
PrintF(out, "\n");
@@ -1138,8 +1150,6 @@ void Script::ScriptPrint(FILE* out) {
type()->ShortPrint(out);
PrintF(out, "\n - id: ");
id()->ShortPrint(out);
- PrintF(out, "\n - data: ");
- data()->ShortPrint(out);
PrintF(out, "\n - context data: ");
context_data()->ShortPrint(out);
PrintF(out, "\n - wrapper: ");
diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h
index 5201a7b31..31117bb94 100644
--- a/deps/v8/src/objects-visiting-inl.h
+++ b/deps/v8/src/objects-visiting-inl.h
@@ -270,7 +270,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
// TODO(ulan): It could be better to record slots only for strongly embedded
// objects here and record slots for weakly embedded object during clearing
// of non-live references in mark-compact.
- if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(), object)) {
+ if (!rinfo->host()->IsWeakObject(object)) {
StaticVisitor::MarkObject(heap, object);
}
}
@@ -282,7 +282,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCell(
ASSERT(rinfo->rmode() == RelocInfo::CELL);
Cell* cell = rinfo->target_cell();
// No need to record slots because the cell space is not compacted during GC.
- if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(), cell)) {
+ if (!rinfo->host()->IsWeakObject(cell)) {
StaticVisitor::MarkObject(heap, cell);
}
}
@@ -313,7 +313,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(
&& (target->ic_state() == MEGAMORPHIC || target->ic_state() == GENERIC ||
target->ic_state() == POLYMORPHIC || heap->flush_monomorphic_ics() ||
Serializer::enabled() || target->ic_age() != heap->global_ic_age())) {
- IC::Clear(target->GetIsolate(), rinfo->pc());
+ IC::Clear(target->GetIsolate(), rinfo->pc(),
+ rinfo->host()->constant_pool());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
}
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
@@ -427,7 +428,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCode(
Heap* heap = map->GetHeap();
Code* code = Code::cast(object);
if (FLAG_cleanup_code_caches_at_gc) {
- code->ClearTypeFeedbackCells(heap);
+ code->ClearTypeFeedbackInfo(heap);
}
if (FLAG_age_code && !Serializer::enabled()) {
code->MakeOlder(heap->mark_compact_collector()->marking_parity());
@@ -489,16 +490,16 @@ void StaticMarkingVisitor<StaticVisitor>::VisitConstantPoolArray(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
- if (constant_pool->count_of_ptr_entries() > 0) {
- int first_ptr_offset = constant_pool->OffsetOfElementAt(
- constant_pool->first_ptr_index());
- int last_ptr_offset = constant_pool->OffsetOfElementAt(
- constant_pool->first_ptr_index() +
- constant_pool->count_of_ptr_entries() - 1);
- StaticVisitor::VisitPointers(
- heap,
- HeapObject::RawField(object, first_ptr_offset),
- HeapObject::RawField(object, last_ptr_offset));
+ for (int i = 0; i < constant_pool->count_of_code_ptr_entries(); i++) {
+ int index = constant_pool->first_code_ptr_index() + i;
+ Address code_entry =
+ reinterpret_cast<Address>(constant_pool->RawFieldOfElementAt(index));
+ StaticVisitor::VisitCodeEntry(heap, code_entry);
+ }
+ for (int i = 0; i < constant_pool->count_of_heap_ptr_entries(); i++) {
+ int index = constant_pool->first_heap_ptr_index() + i;
+ StaticVisitor::VisitPointer(heap,
+ constant_pool->RawFieldOfElementAt(index));
}
}
@@ -898,6 +899,7 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
IteratePointer(v, kHandlerTableOffset);
IteratePointer(v, kDeoptimizationDataOffset);
IteratePointer(v, kTypeFeedbackInfoOffset);
+ IterateNextCodeLink(v, kNextCodeLinkOffset);
IteratePointer(v, kConstantPoolOffset);
RelocIterator it(this, mode_mask);
@@ -932,6 +934,9 @@ void Code::CodeIterateBody(Heap* heap) {
StaticVisitor::VisitPointer(
heap,
reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset));
+ StaticVisitor::VisitNextCodeLink(
+ heap,
+ reinterpret_cast<Object**>(this->address() + kNextCodeLinkOffset));
StaticVisitor::VisitPointer(
heap,
reinterpret_cast<Object**>(this->address() + kConstantPoolOffset));
diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h
index 41e5fd6fd..de8ca6d05 100644
--- a/deps/v8/src/objects-visiting.h
+++ b/deps/v8/src/objects-visiting.h
@@ -414,6 +414,8 @@ class StaticMarkingVisitor : public StaticVisitorBase {
INLINE(static void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo));
INLINE(static void VisitExternalReference(RelocInfo* rinfo)) { }
INLINE(static void VisitRuntimeEntry(RelocInfo* rinfo)) { }
+ // Skip the weak next code link in a code object.
+ INLINE(static void VisitNextCodeLink(Heap* heap, Object** slot)) { }
// TODO(mstarzinger): This should be made protected once refactoring is done.
// Mark non-optimize code for functions inlined into the given optimized
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 15c12db4e..45220ee29 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -80,6 +80,8 @@ MaybeObject* Object::ToObject(Context* native_context) {
return CreateJSValue(native_context->boolean_function(), this);
} else if (IsString()) {
return CreateJSValue(native_context->string_function(), this);
+ } else if (IsSymbol()) {
+ return CreateJSValue(native_context->symbol_function(), this);
}
ASSERT(IsJSObject());
return this;
@@ -491,19 +493,11 @@ Handle<Object> Object::GetProperty(Handle<Object> object,
// method (or somewhere else entirely). Needs more global clean-up.
uint32_t index;
Isolate* isolate = name->GetIsolate();
- if (name->AsArrayIndex(&index))
- return GetElement(isolate, object, index);
+ if (name->AsArrayIndex(&index)) return GetElement(isolate, object, index);
CALL_HEAP_FUNCTION(isolate, object->GetProperty(*name), Object);
}
-Handle<Object> Object::GetElement(Isolate* isolate,
- Handle<Object> object,
- uint32_t index) {
- CALL_HEAP_FUNCTION(isolate, object->GetElement(isolate, index), Object);
-}
-
-
MaybeObject* JSProxy::GetElementWithHandler(Object* receiver,
uint32_t index) {
String* name;
@@ -517,7 +511,7 @@ Handle<Object> JSProxy::SetElementWithHandler(Handle<JSProxy> proxy,
Handle<JSReceiver> receiver,
uint32_t index,
Handle<Object> value,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
Isolate* isolate = proxy->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
return SetPropertyWithHandler(
@@ -613,29 +607,29 @@ Handle<Object> JSObject::GetPropertyWithFailedAccessCheck(
// No accessible property found.
*attributes = ABSENT;
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_GET);
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_GET);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->undefined_value();
}
PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
- Object* receiver,
+ Handle<JSObject> object,
LookupResult* result,
- Name* name,
+ Handle<Name> name,
bool continue_search) {
if (result->IsProperty()) {
switch (result->type()) {
case CALLBACKS: {
// Only allow API accessors.
- Object* obj = result->GetCallbackObject();
+ Handle<Object> obj(result->GetCallbackObject(), object->GetIsolate());
if (obj->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(obj);
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(obj);
if (info->all_can_read()) {
return result->GetAttributes();
}
} else if (obj->IsAccessorPair()) {
- AccessorPair* pair = AccessorPair::cast(obj);
+ Handle<AccessorPair> pair = Handle<AccessorPair>::cast(obj);
if (pair->all_can_read()) {
return result->GetAttributes();
}
@@ -648,13 +642,11 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
case CONSTANT: {
if (!continue_search) break;
// Search ALL_CAN_READ accessors in prototype chain.
- LookupResult r(GetIsolate());
- result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
+ LookupResult r(object->GetIsolate());
+ result->holder()->LookupRealNamedPropertyInPrototypes(*name, &r);
if (r.IsProperty()) {
- return GetPropertyAttributeWithFailedAccessCheck(receiver,
- &r,
- name,
- continue_search);
+ return GetPropertyAttributeWithFailedAccessCheck(
+ object, &r, name, continue_search);
}
break;
}
@@ -662,17 +654,15 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
case INTERCEPTOR: {
// If the object has an interceptor, try real named properties.
// No access check in GetPropertyAttributeWithInterceptor.
- LookupResult r(GetIsolate());
+ LookupResult r(object->GetIsolate());
if (continue_search) {
- result->holder()->LookupRealNamedProperty(name, &r);
+ result->holder()->LookupRealNamedProperty(*name, &r);
} else {
- result->holder()->LocalLookupRealNamedProperty(name, &r);
+ result->holder()->LocalLookupRealNamedProperty(*name, &r);
}
if (!r.IsFound()) break;
- return GetPropertyAttributeWithFailedAccessCheck(receiver,
- &r,
- name,
- continue_search);
+ return GetPropertyAttributeWithFailedAccessCheck(
+ object, &r, name, continue_search);
}
case HANDLER:
@@ -682,12 +672,12 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
}
}
- GetIsolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ object->GetIsolate()->ReportFailedAccessCheckWrapper(object, v8::ACCESS_HAS);
return ABSENT;
}
-Object* JSObject::GetNormalizedProperty(LookupResult* result) {
+Object* JSObject::GetNormalizedProperty(const LookupResult* result) {
ASSERT(!HasFastProperties());
Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
if (IsGlobalObject()) {
@@ -699,7 +689,7 @@ Object* JSObject::GetNormalizedProperty(LookupResult* result) {
void JSObject::SetNormalizedProperty(Handle<JSObject> object,
- LookupResult* result,
+ const LookupResult* result,
Handle<Object> value) {
ASSERT(!object->HasFastProperties());
NameDictionary* property_dictionary = object->property_dictionary();
@@ -732,7 +722,7 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
Handle<NameDictionary> property_dictionary(object->property_dictionary());
if (!name->IsUniqueName()) {
- name = object->GetIsolate()->factory()->InternalizedStringFromString(
+ name = object->GetIsolate()->factory()->InternalizeString(
Handle<String>::cast(name));
}
@@ -972,63 +962,70 @@ MaybeObject* Object::GetProperty(Object* receiver,
}
-MaybeObject* Object::GetElementWithReceiver(Isolate* isolate,
- Object* receiver,
- uint32_t index) {
- Heap* heap = isolate->heap();
- Object* holder = this;
+Handle<Object> Object::GetElementWithReceiver(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> receiver,
+ uint32_t index) {
+ Handle<Object> holder;
// Iterate up the prototype chain until an element is found or the null
// prototype is encountered.
- for (holder = this;
- holder != heap->null_value();
- holder = holder->GetPrototype(isolate)) {
+ for (holder = object;
+ !holder->IsNull();
+ holder = Handle<Object>(holder->GetPrototype(isolate), isolate)) {
if (!holder->IsJSObject()) {
Context* native_context = isolate->context()->native_context();
if (holder->IsNumber()) {
- holder = native_context->number_function()->instance_prototype();
+ holder = Handle<Object>(
+ native_context->number_function()->instance_prototype(), isolate);
} else if (holder->IsString()) {
- holder = native_context->string_function()->instance_prototype();
+ holder = Handle<Object>(
+ native_context->string_function()->instance_prototype(), isolate);
} else if (holder->IsSymbol()) {
- holder = native_context->symbol_function()->instance_prototype();
+ holder = Handle<Object>(
+ native_context->symbol_function()->instance_prototype(), isolate);
} else if (holder->IsBoolean()) {
- holder = native_context->boolean_function()->instance_prototype();
+ holder = Handle<Object>(
+ native_context->boolean_function()->instance_prototype(), isolate);
} else if (holder->IsJSProxy()) {
- return JSProxy::cast(holder)->GetElementWithHandler(receiver, index);
+ CALL_HEAP_FUNCTION(isolate,
+ Handle<JSProxy>::cast(holder)->GetElementWithHandler(
+ *receiver, index),
+ Object);
} else {
// Undefined and null have no indexed properties.
ASSERT(holder->IsUndefined() || holder->IsNull());
- return heap->undefined_value();
+ return isolate->factory()->undefined_value();
}
}
// Inline the case for JSObjects. Doing so significantly improves the
// performance of fetching elements where checking the prototype chain is
// necessary.
- JSObject* js_object = JSObject::cast(holder);
+ Handle<JSObject> js_object = Handle<JSObject>::cast(holder);
// Check access rights if needed.
if (js_object->IsAccessCheckNeeded()) {
- Isolate* isolate = heap->isolate();
- if (!isolate->MayIndexedAccess(js_object, index, v8::ACCESS_GET)) {
- isolate->ReportFailedAccessCheck(js_object, v8::ACCESS_GET);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return heap->undefined_value();
+ if (!isolate->MayIndexedAccessWrapper(js_object, index, v8::ACCESS_GET)) {
+ isolate->ReportFailedAccessCheckWrapper(js_object, v8::ACCESS_GET);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->undefined_value();
}
}
if (js_object->HasIndexedInterceptor()) {
- return js_object->GetElementWithInterceptor(receiver, index);
+ return JSObject::GetElementWithInterceptor(js_object, receiver, index);
}
- if (js_object->elements() != heap->empty_fixed_array()) {
- MaybeObject* result = js_object->GetElementsAccessor()->Get(
+ if (js_object->elements() != isolate->heap()->empty_fixed_array()) {
+ Handle<Object> result = js_object->GetElementsAccessor()->Get(
receiver, js_object, index);
- if (result != heap->the_hole_value()) return result;
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
+ if (!result->IsTheHole()) return result;
}
}
- return heap->undefined_value();
+ return isolate->factory()->undefined_value();
}
@@ -1278,14 +1275,13 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// - the space the existing string occupies is too small for a regular
// external string.
// - the existing string is in old pointer space and the backing store of
- // the external string is not aligned. The GC cannot deal with fields
- // containing an unaligned address that points to outside of V8's heap.
+ // the external string is not aligned. The GC cannot deal with a field
+ // containing a possibly unaligned address to outside of V8's heap.
// In either case we resort to a short external string instead, omitting
// the field caching the address of the backing store. When we encounter
// short external strings in generated code, we need to bailout to runtime.
if (size < ExternalString::kSize ||
- (!IsAligned(reinterpret_cast<intptr_t>(resource->data()), kPointerSize) &&
- heap->old_pointer_space()->Contains(this))) {
+ heap->old_pointer_space()->Contains(this)) {
this->set_map_no_write_barrier(
is_internalized
? (is_ascii
@@ -1312,10 +1308,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Fill the remainder of the string with dead wood.
int new_size = this->Size(); // Byte size of the external String object.
heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
- if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
- new_size - size);
- }
+ heap->AdjustLiveBytes(this->address(), new_size - size, Heap::FROM_MUTATOR);
return true;
}
@@ -1349,14 +1342,13 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
// - the space the existing string occupies is too small for a regular
// external string.
// - the existing string is in old pointer space and the backing store of
- // the external string is not aligned. The GC cannot deal with fields
- // containing an unaligned address that points to outside of V8's heap.
+ // the external string is not aligned. The GC cannot deal with a field
+ // containing a possibly unaligned address to outside of V8's heap.
// In either case we resort to a short external string instead, omitting
// the field caching the address of the backing store. When we encounter
// short external strings in generated code, we need to bailout to runtime.
if (size < ExternalString::kSize ||
- (!IsAligned(reinterpret_cast<intptr_t>(resource->data()), kPointerSize) &&
- heap->old_pointer_space()->Contains(this))) {
+ heap->old_pointer_space()->Contains(this)) {
this->set_map_no_write_barrier(
is_internalized ? heap->short_external_ascii_internalized_string_map()
: heap->short_external_ascii_string_map());
@@ -1372,10 +1364,7 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
// Fill the remainder of the string with dead wood.
int new_size = this->Size(); // Byte size of the external String object.
heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
- if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
- new_size - size);
- }
+ heap->AdjustLiveBytes(this->address(), new_size - size, Heap::FROM_MUTATOR);
return true;
}
@@ -1543,17 +1532,18 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
void JSObject::PrintElementsTransition(
- FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
- ElementsKind to_kind, FixedArrayBase* to_elements) {
+ FILE* file, Handle<JSObject> object,
+ ElementsKind from_kind, Handle<FixedArrayBase> from_elements,
+ ElementsKind to_kind, Handle<FixedArrayBase> to_elements) {
if (from_kind != to_kind) {
PrintF(file, "elements transition [");
PrintElementsKind(file, from_kind);
PrintF(file, " -> ");
PrintElementsKind(file, to_kind);
PrintF(file, "] in ");
- JavaScriptFrame::PrintTop(GetIsolate(), file, false, true);
+ JavaScriptFrame::PrintTop(object->GetIsolate(), file, false, true);
PrintF(file, " for ");
- ShortPrint(file);
+ object->ShortPrint(file);
PrintF(file, " from ");
from_elements->ShortPrint(file);
PrintF(file, " to ");
@@ -1574,7 +1564,12 @@ void Map::PrintGeneralization(FILE* file,
PrintF(file, "[generalizing ");
constructor_name()->PrintOn(file);
PrintF(file, "] ");
- String::cast(instance_descriptors()->GetKey(modify_index))->PrintOn(file);
+ Name* name = instance_descriptors()->GetKey(modify_index);
+ if (name->IsString()) {
+ String::cast(name)->PrintOn(file);
+ } else {
+ PrintF(file, "{symbol %p}", static_cast<void*>(name));
+ }
if (constant_to_field) {
PrintF(file, ":c->f");
} else {
@@ -1614,7 +1609,7 @@ void JSObject::PrintInstanceMigration(FILE* file,
if (name->IsString()) {
String::cast(name)->PrintOn(file);
} else {
- PrintF(file, "???");
+ PrintF(file, "{symbol %p}", static_cast<void*>(name));
}
PrintF(file, " ");
}
@@ -1970,31 +1965,6 @@ static Handle<Object> NewStorageFor(Isolate* isolate,
}
-void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object,
- Handle<Map> new_map,
- Handle<Name> name,
- Handle<Object> value,
- int field_index,
- Representation representation) {
- Isolate* isolate = object->GetIsolate();
-
- // This method is used to transition to a field. If we are transitioning to a
- // double field, allocate new storage.
- Handle<Object> storage = NewStorageFor(isolate, value, representation);
-
- if (object->map()->unused_property_fields() == 0) {
- int new_unused = new_map->unused_property_fields();
- Handle<FixedArray> properties(object->properties());
- Handle<FixedArray> values = isolate->factory()->CopySizeFixedArray(
- properties, properties->length() + new_unused + 1);
- object->set_properties(*values);
- }
-
- object->set_map(*new_map);
- object->FastPropertyAtPut(field_index, *storage);
-}
-
-
static MaybeObject* CopyAddFieldDescriptor(Map* map,
Name* name,
int index,
@@ -2059,7 +2029,16 @@ void JSObject::AddFastProperty(Handle<JSObject> object,
Handle<Map> new_map = CopyAddFieldDescriptor(
handle(object->map()), name, index, attributes, representation, flag);
- AddFastPropertyUsingMap(object, new_map, name, value, index, representation);
+ JSObject::MigrateToMap(object, new_map);
+
+ if (representation.IsDouble()) {
+ // Nothing more to be done.
+ if (value->IsUninitialized()) return;
+ HeapNumber* box = HeapNumber::cast(object->RawFastPropertyAt(index));
+ box->set_value(value->Number());
+ } else {
+ object->FastPropertyAtPut(index, *value);
+ }
}
@@ -2103,7 +2082,7 @@ void JSObject::AddConstantProperty(Handle<JSObject> object,
Handle<Map> new_map = CopyAddConstantDescriptor(
handle(object->map()), name, constant, attributes, flag);
- object->set_map(*new_map);
+ JSObject::MigrateToMap(object, new_map);
}
@@ -2142,7 +2121,7 @@ Handle<Object> JSObject::AddProperty(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
JSReceiver::StoreFromKeyed store_mode,
ExtensibilityCheck extensibility_check,
ValueType value_type,
@@ -2152,13 +2131,13 @@ Handle<Object> JSObject::AddProperty(Handle<JSObject> object,
Isolate* isolate = object->GetIsolate();
if (!name->IsUniqueName()) {
- name = isolate->factory()->InternalizedStringFromString(
+ name = isolate->factory()->InternalizeString(
Handle<String>::cast(name));
}
if (extensibility_check == PERFORM_EXTENSIBILITY_CHECK &&
!object->map()->is_extensible()) {
- if (strict_mode == kNonStrictMode) {
+ if (strict_mode == SLOPPY) {
return value;
} else {
Handle<Object> args[1] = { name };
@@ -2192,8 +2171,7 @@ Handle<Object> JSObject::AddProperty(Handle<JSObject> object,
AddSlowProperty(object, name, value, attributes);
}
- if (FLAG_harmony_observation &&
- object->map()->is_observed() &&
+ if (object->map()->is_observed() &&
*name != isolate->heap()->hidden_string()) {
Handle<Object> old_value = isolate->factory()->the_hole_value();
EnqueueChangeRecord(object, "add", name, old_value);
@@ -2231,7 +2209,7 @@ Handle<Object> JSObject::SetPropertyPostInterceptor(
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// Check local property, ignore interceptor.
LookupResult result(object->GetIsolate());
object->LocalLookupRealNamedProperty(*name, &result);
@@ -2286,9 +2264,6 @@ const char* Representation::Mnemonic() const {
}
-enum RightTrimMode { FROM_GC, FROM_MUTATOR };
-
-
static void ZapEndOfFixedArray(Address new_end, int to_trim) {
// If we are doing a big trim in old space then we zap the space.
Object** zap = reinterpret_cast<Object**>(new_end);
@@ -2299,7 +2274,7 @@ static void ZapEndOfFixedArray(Address new_end, int to_trim) {
}
-template<RightTrimMode trim_mode>
+template<Heap::InvocationMode mode>
static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
ASSERT(elms->map() != heap->fixed_cow_array_map());
// For now this trick is only applied to fixed arrays in new and paged space.
@@ -2311,7 +2286,7 @@ static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
- if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) {
+ if (mode != Heap::FROM_GC || Heap::ShouldZapGarbage()) {
ZapEndOfFixedArray(new_end, to_trim);
}
@@ -2324,14 +2299,7 @@ static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
elms->set_length(len - to_trim);
- // Maintain marking consistency for IncrementalMarking.
- if (Marking::IsBlack(Marking::MarkBitFrom(elms))) {
- if (trim_mode == FROM_GC) {
- MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta);
- } else {
- MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
- }
- }
+ heap->AdjustLiveBytes(elms->address(), -size_delta, mode);
// The array may not be moved during GC,
// and size has to be adjusted nevertheless.
@@ -2351,16 +2319,14 @@ bool Map::InstancesNeedRewriting(Map* target,
ASSERT(target_number_of_fields >= number_of_fields);
if (target_number_of_fields != number_of_fields) return true;
- if (FLAG_track_double_fields) {
- // If smi descriptors were replaced by double descriptors, rewrite.
- DescriptorArray* old_desc = instance_descriptors();
- DescriptorArray* new_desc = target->instance_descriptors();
- int limit = NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
- if (new_desc->GetDetails(i).representation().IsDouble() &&
- !old_desc->GetDetails(i).representation().IsDouble()) {
- return true;
- }
+ // If smi descriptors were replaced by double descriptors, rewrite.
+ DescriptorArray* old_desc = instance_descriptors();
+ DescriptorArray* new_desc = target->instance_descriptors();
+ int limit = NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ if (new_desc->GetDetails(i).representation().IsDouble() &&
+ !old_desc->GetDetails(i).representation().IsDouble()) {
+ return true;
}
}
@@ -2416,9 +2382,14 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors());
Handle<DescriptorArray> new_descriptors(new_map->instance_descriptors());
- int descriptors = new_map->NumberOfOwnDescriptors();
+ int old_nof = old_map->NumberOfOwnDescriptors();
+ int new_nof = new_map->NumberOfOwnDescriptors();
+
+ // This method only supports generalizing instances to at least the same
+ // number of properties.
+ ASSERT(old_nof <= new_nof);
- for (int i = 0; i < descriptors; i++) {
+ for (int i = 0; i < old_nof; i++) {
PropertyDetails details = new_descriptors->GetDetails(i);
if (details.type() != FIELD) continue;
PropertyDetails old_details = old_descriptors->GetDetails(i);
@@ -2432,22 +2403,30 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
? old_descriptors->GetValue(i)
: object->RawFastPropertyAt(old_descriptors->GetFieldIndex(i));
Handle<Object> value(raw_value, isolate);
- if (FLAG_track_double_fields &&
- !old_details.representation().IsDouble() &&
+ if (!old_details.representation().IsDouble() &&
details.representation().IsDouble()) {
if (old_details.representation().IsNone()) {
value = handle(Smi::FromInt(0), isolate);
}
value = NewStorageFor(isolate, value, details.representation());
}
- ASSERT(!(FLAG_track_double_fields &&
- details.representation().IsDouble() &&
- value->IsSmi()));
+ ASSERT(!(details.representation().IsDouble() && value->IsSmi()));
int target_index = new_descriptors->GetFieldIndex(i) - inobject;
if (target_index < 0) target_index += total_size;
array->set(target_index, *value);
}
+ for (int i = old_nof; i < new_nof; i++) {
+ PropertyDetails details = new_descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ if (details.representation().IsDouble()) {
+ int target_index = new_descriptors->GetFieldIndex(i) - inobject;
+ if (target_index < 0) target_index += total_size;
+ Handle<Object> box = isolate->factory()->NewHeapNumber(0);
+ array->set(target_index, *box);
+ }
+ }
+
// From here on we cannot fail and we shouldn't GC anymore.
DisallowHeapAllocation no_allocation;
@@ -2468,7 +2447,7 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
// If there are properties in the new backing store, trim it to the correct
// size and install the backing store into the object.
if (external > 0) {
- RightTrimFixedArray<FROM_MUTATOR>(isolate->heap(), *array, inobject);
+ RightTrimFixedArray<Heap::FROM_MUTATOR>(isolate->heap(), *array, inobject);
object->set_properties(*array);
}
@@ -2545,7 +2524,6 @@ Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map,
void Map::DeprecateTransitionTree() {
- if (!FLAG_track_fields) return;
if (is_deprecated()) return;
if (HasTransitionArray()) {
TransitionArray* transitions = this->transitions();
@@ -2577,6 +2555,7 @@ void Map::DeprecateTarget(Name* key, DescriptorArray* new_descriptors) {
DescriptorArray* to_replace = instance_descriptors();
Map* current = this;
+ GetHeap()->incremental_marking()->RecordWrites(to_replace);
while (current->instance_descriptors() == to_replace) {
current->SetEnumLength(kInvalidEnumCacheSentinel);
current->set_instance_descriptors(new_descriptors);
@@ -2625,6 +2604,8 @@ Map* Map::FindUpdatedMap(int verbatim,
current->instance_descriptors()->GetValue(i)) {
return NULL;
}
+ } else if (target_details.type() == CALLBACKS) {
+ return NULL;
}
}
@@ -2846,7 +2827,7 @@ Handle<Object> JSObject::SetPropertyWithInterceptor(
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// TODO(rossberg): Support symbols in the API.
if (name->IsSymbol()) return value;
Isolate* isolate = object->GetIsolate();
@@ -2878,7 +2859,7 @@ Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
StoreFromKeyed store_mode) {
LookupResult result(object->GetIsolate());
object->LocalLookup(*name, &result, true);
@@ -2895,7 +2876,7 @@ Handle<Object> JSObject::SetPropertyWithCallback(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
Handle<JSObject> holder,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
Isolate* isolate = object->GetIsolate();
// We should never get here to initialize a const with the hole
@@ -2954,9 +2935,7 @@ Handle<Object> JSObject::SetPropertyWithCallback(Handle<JSObject> object,
return SetPropertyWithDefinedSetter(
object, Handle<JSReceiver>::cast(setter), value);
} else {
- if (strict_mode == kNonStrictMode) {
- return value;
- }
+ if (strict_mode == SLOPPY) return value;
Handle<Object> args[2] = { name, holder };
Handle<Object> error =
isolate->factory()->NewTypeError("no_setter_in_callback",
@@ -3007,7 +2986,7 @@ Handle<Object> JSObject::SetElementWithCallbackSetterInPrototypes(
uint32_t index,
Handle<Object> value,
bool* found,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
Isolate *isolate = object->GetIsolate();
for (Handle<Object> proto = handle(object->GetPrototype(), isolate);
!proto->IsNull();
@@ -3047,7 +3026,7 @@ Handle<Object> JSObject::SetPropertyViaPrototypes(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool* done) {
Isolate* isolate = object->GetIsolate();
@@ -3065,14 +3044,12 @@ Handle<Object> JSObject::SetPropertyViaPrototypes(Handle<JSObject> object,
*done = result.IsReadOnly();
break;
case INTERCEPTOR: {
- PropertyAttributes attr =
- result.holder()->GetPropertyAttributeWithInterceptor(
- *object, *name, true);
+ PropertyAttributes attr = GetPropertyAttributeWithInterceptor(
+ handle(result.holder()), object, name, true);
*done = !!(attr & READ_ONLY);
break;
}
case CALLBACKS: {
- if (!FLAG_es5_readonly && result.IsReadOnly()) break;
*done = true;
Handle<Object> callback_object(result.GetCallbackObject(), isolate);
return SetPropertyWithCallback(object, callback_object, name, value,
@@ -3091,9 +3068,8 @@ Handle<Object> JSObject::SetPropertyViaPrototypes(Handle<JSObject> object,
}
// If we get here with *done true, we have encountered a read-only property.
- if (!FLAG_es5_readonly) *done = false;
if (*done) {
- if (strict_mode == kNonStrictMode) return value;
+ if (strict_mode == SLOPPY) return value;
Handle<Object> args[] = { name, object };
Handle<Object> error = isolate->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
@@ -3135,7 +3111,7 @@ static int AppendUniqueCallbacks(NeanderArray* callbacks,
Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks->get(i)));
if (entry->name()->IsUniqueName()) continue;
Handle<String> key =
- isolate->factory()->InternalizedStringFromString(
+ isolate->factory()->InternalizeString(
Handle<String>(String::cast(entry->name())));
entry->set_name(*key);
}
@@ -3257,24 +3233,31 @@ Handle<Map> Map::FindTransitionedMap(MapHandleList* candidates) {
static Map* FindClosestElementsTransition(Map* map, ElementsKind to_kind) {
Map* current_map = map;
- int index = GetSequenceIndexFromFastElementsKind(map->elements_kind());
- int to_index = IsFastElementsKind(to_kind)
- ? GetSequenceIndexFromFastElementsKind(to_kind)
- : GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+ int target_kind =
+ IsFastElementsKind(to_kind) || IsExternalArrayElementsKind(to_kind)
+ ? to_kind
+ : TERMINAL_FAST_ELEMENTS_KIND;
- ASSERT(index <= to_index);
+ // Support for legacy API.
+ if (IsExternalArrayElementsKind(to_kind) &&
+ !IsFixedTypedArrayElementsKind(map->elements_kind())) {
+ return map;
+ }
- for (; index < to_index; ++index) {
+ ElementsKind kind = map->elements_kind();
+ while (kind != target_kind) {
+ kind = GetNextTransitionElementsKind(kind);
if (!current_map->HasElementsTransition()) return current_map;
current_map = current_map->elements_transition_map();
}
- if (!IsFastElementsKind(to_kind) && current_map->HasElementsTransition()) {
+
+ if (to_kind != kind && current_map->HasElementsTransition()) {
+ ASSERT(to_kind == DICTIONARY_ELEMENTS);
Map* next_map = current_map->elements_transition_map();
if (next_map->elements_kind() == to_kind) return next_map;
}
- ASSERT(IsFastElementsKind(to_kind)
- ? current_map->elements_kind() == to_kind
- : current_map->elements_kind() == TERMINAL_FAST_ELEMENTS_KIND);
+
+ ASSERT(current_map->elements_kind() == target_kind);
return current_map;
}
@@ -3302,26 +3285,21 @@ bool Map::IsMapInArrayPrototypeChain() {
static MaybeObject* AddMissingElementsTransitions(Map* map,
ElementsKind to_kind) {
- ASSERT(IsFastElementsKind(map->elements_kind()));
- int index = GetSequenceIndexFromFastElementsKind(map->elements_kind());
- int to_index = IsFastElementsKind(to_kind)
- ? GetSequenceIndexFromFastElementsKind(to_kind)
- : GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
-
- ASSERT(index <= to_index);
+ ASSERT(IsTransitionElementsKind(map->elements_kind()));
Map* current_map = map;
- for (; index < to_index; ++index) {
- ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(index + 1);
+ ElementsKind kind = map->elements_kind();
+ while (kind != to_kind && !IsTerminalElementsKind(kind)) {
+ kind = GetNextTransitionElementsKind(kind);
MaybeObject* maybe_next_map =
- current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION);
+ current_map->CopyAsElementsKind(kind, INSERT_TRANSITION);
if (!maybe_next_map->To(&current_map)) return maybe_next_map;
}
// In case we are exiting the fast elements kind system, just add the map in
// the end.
- if (!IsFastElementsKind(to_kind)) {
+ if (kind != to_kind) {
MaybeObject* maybe_next_map =
current_map->CopyAsElementsKind(to_kind, INSERT_TRANSITION);
if (!maybe_next_map->To(&current_map)) return maybe_next_map;
@@ -3353,7 +3331,7 @@ MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
// Only remember the map transition if there is not an already existing
// non-matching element transition.
!start_map->IsUndefined() && !start_map->is_shared() &&
- IsFastElementsKind(from_kind);
+ IsTransitionElementsKind(from_kind);
// Only store fast element maps in ascending generality.
if (IsFastElementsKind(to_kind)) {
@@ -3370,6 +3348,15 @@ MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
}
+// TODO(ishell): Temporary wrapper until handlified.
+// static
+Handle<Map> Map::AsElementsKind(Handle<Map> map, ElementsKind kind) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ map->AsElementsKind(kind),
+ Map);
+}
+
+
MaybeObject* Map::AsElementsKind(ElementsKind kind) {
Map* closest_map = FindClosestElementsTransition(this, kind);
@@ -3382,6 +3369,7 @@ MaybeObject* Map::AsElementsKind(ElementsKind kind) {
void JSObject::LocalLookupRealNamedProperty(Name* name, LookupResult* result) {
+ DisallowHeapAllocation no_gc;
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
if (proto->IsNull()) return result->NotFound();
@@ -3461,7 +3449,7 @@ Handle<Object> JSObject::SetPropertyWithFailedAccessCheck(
Handle<Name> name,
Handle<Object> value,
bool check_prototype,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
if (check_prototype && !result->IsProperty()) {
object->LookupRealNamedPropertyInPrototypes(*name, result);
}
@@ -3517,7 +3505,7 @@ Handle<Object> JSObject::SetPropertyWithFailedAccessCheck(
}
Isolate* isolate = object->GetIsolate();
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_SET);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return value;
}
@@ -3528,7 +3516,7 @@ Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
Handle<Name> key,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
StoreFromKeyed store_mode) {
if (result->IsHandler()) {
return JSProxy::SetPropertyWithHandler(handle(result->proxy()),
@@ -3560,7 +3548,7 @@ Handle<Object> JSProxy::SetPropertyWithHandler(Handle<JSProxy> proxy,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
Isolate* isolate = proxy->GetIsolate();
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
@@ -3580,7 +3568,7 @@ Handle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool* done) {
Isolate* isolate = proxy->GetIsolate();
Handle<Object> handler(proxy->handler(), isolate); // Trap might morph proxy.
@@ -3648,7 +3636,7 @@ Handle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
ASSERT(writable->IsTrue() || writable->IsFalse());
*done = writable->IsFalse();
if (!*done) return isolate->factory()->the_hole_value();
- if (strict_mode == kNonStrictMode) return value;
+ if (strict_mode == SLOPPY) return value;
Handle<Object> args[] = { name, receiver };
Handle<Object> error = isolate->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
@@ -3667,7 +3655,7 @@ Handle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
receiver, Handle<JSReceiver>::cast(setter), value);
}
- if (strict_mode == kNonStrictMode) return value;
+ if (strict_mode == SLOPPY) return value;
Handle<Object> args2[] = { name, proxy };
Handle<Object> error = isolate->factory()->NewTypeError(
"no_setter_in_callback", HandleVector(args2, ARRAY_SIZE(args2)));
@@ -3711,21 +3699,18 @@ Handle<Object> JSProxy::DeleteElementWithHandler(
}
-MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
- JSReceiver* receiver_raw,
- Name* name_raw) {
- Isolate* isolate = GetIsolate();
+PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name) {
+ Isolate* isolate = proxy->GetIsolate();
HandleScope scope(isolate);
- Handle<JSProxy> proxy(this);
- Handle<Object> handler(this->handler(), isolate); // Trap might morph proxy.
- Handle<JSReceiver> receiver(receiver_raw);
- Handle<Object> name(name_raw, isolate);
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (name->IsSymbol()) return ABSENT;
Handle<Object> args[] = { name };
- Handle<Object> result = CallTrap(
+ Handle<Object> result = proxy->CallTrap(
"getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
if (isolate->has_pending_exception()) return NONE;
@@ -3760,6 +3745,7 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
}
if (configurable->IsFalse()) {
+ Handle<Object> handler(proxy->handler(), isolate);
Handle<String> trap = isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("getPropertyDescriptor"));
Handle<Object> args[] = { handler, trap, name };
@@ -3777,15 +3763,13 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
}
-MUST_USE_RESULT PropertyAttributes JSProxy::GetElementAttributeWithHandler(
- JSReceiver* receiver_raw,
+PropertyAttributes JSProxy::GetElementAttributeWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
uint32_t index) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSProxy> proxy(this);
- Handle<JSReceiver> receiver(receiver_raw);
+ Isolate* isolate = proxy->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
- return proxy->GetPropertyAttributeWithHandler(*receiver, *name);
+ return GetPropertyAttributeWithHandler(proxy, receiver, name);
}
@@ -3861,16 +3845,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
}
map = MapAsElementsKind(map, to_kind);
}
- int total_size =
- map->NumberOfOwnDescriptors() + map->unused_property_fields();
- int out_of_object = total_size - map->inobject_properties();
- if (out_of_object != object->properties()->length()) {
- Isolate* isolate = object->GetIsolate();
- Handle<FixedArray> new_properties = isolate->factory()->CopySizeFixedArray(
- handle(object->properties()), out_of_object);
- object->set_properties(*new_properties);
- }
- object->set_map(*map);
+ JSObject::MigrateToMap(object, map);
}
@@ -3917,7 +3892,7 @@ Handle<Object> JSObject::SetPropertyUsingTransition(
// of the map. If we get a fast copy of the map, all field representations
// will be tagged since the transition is omitted.
return JSObject::AddProperty(
- object, name, value, attributes, kNonStrictMode,
+ object, name, value, attributes, SLOPPY,
JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED,
JSReceiver::OMIT_EXTENSIBILITY_CHECK,
JSObject::FORCE_TAGGED, FORCE_FIELD, OMIT_TRANSITION);
@@ -3926,29 +3901,31 @@ Handle<Object> JSObject::SetPropertyUsingTransition(
// Keep the target CONSTANT if the same value is stored.
// TODO(verwaest): Also support keeping the placeholder
// (value->IsUninitialized) as constant.
- if (details.type() == CONSTANT &&
- descriptors->GetValue(descriptor) == *value) {
- object->set_map(*transition_map);
- return value;
- }
-
- Representation representation = details.representation();
-
- if (!value->FitsRepresentation(representation) ||
- details.type() == CONSTANT) {
+ if (!value->FitsRepresentation(details.representation()) ||
+ (details.type() == CONSTANT &&
+ descriptors->GetValue(descriptor) != *value)) {
transition_map = Map::GeneralizeRepresentation(transition_map,
descriptor, value->OptimalRepresentation(), FORCE_FIELD);
- Object* back = transition_map->GetBackPointer();
- if (back->IsMap()) {
- MigrateToMap(object, handle(Map::cast(back)));
- }
- descriptors = transition_map->instance_descriptors();
- representation = descriptors->GetDetails(descriptor).representation();
}
+ JSObject::MigrateToMap(object, transition_map);
+
+ // Reload.
+ descriptors = transition_map->instance_descriptors();
+ details = descriptors->GetDetails(descriptor);
+
+ if (details.type() != FIELD) return value;
+
int field_index = descriptors->GetFieldIndex(descriptor);
- AddFastPropertyUsingMap(
- object, transition_map, name, value, field_index, representation);
+ if (details.representation().IsDouble()) {
+ // Nothing more to be done.
+ if (value->IsUninitialized()) return value;
+ HeapNumber* box = HeapNumber::cast(object->RawFastPropertyAt(field_index));
+ box->set_value(value->Number());
+ } else {
+ object->FastPropertyAtPut(field_index, *value);
+ }
+
return value;
}
@@ -3968,7 +3945,7 @@ static void SetPropertyToField(LookupResult* lookup,
representation = desc->GetDetails(descriptor).representation();
}
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
HeapNumber* storage = HeapNumber::cast(lookup->holder()->RawFastPropertyAt(
lookup->GetFieldIndex().field_index()));
storage->set_value(value->Number());
@@ -4029,7 +4006,7 @@ Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
StoreFromKeyed store_mode) {
Isolate* isolate = object->GetIsolate();
@@ -4047,7 +4024,7 @@ Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ if (!isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_SET)) {
return SetPropertyWithFailedAccessCheck(object, lookup, name, value,
true, strict_mode);
}
@@ -4078,7 +4055,7 @@ Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
}
if (lookup->IsProperty() && lookup->IsReadOnly()) {
- if (strict_mode == kStrictMode) {
+ if (strict_mode == STRICT) {
Handle<Object> args[] = { name, object };
Handle<Object> error = isolate->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
@@ -4090,11 +4067,11 @@ Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
}
Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = FLAG_harmony_observation &&
- object->map()->is_observed() &&
+ bool is_observed = object->map()->is_observed() &&
*name != isolate->heap()->hidden_string();
if (is_observed && lookup->IsDataProperty()) {
old_value = Object::GetProperty(object, name);
+ CHECK_NOT_EMPTY_HANDLE(isolate, old_value);
}
// This is a real property that is not read-only, or it is a
@@ -4140,6 +4117,7 @@ Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
object->LocalLookup(*name, &new_lookup, true);
if (new_lookup.IsDataProperty()) {
Handle<Object> new_value = Object::GetProperty(object, name);
+ CHECK_NOT_EMPTY_HANDLE(isolate, new_value);
if (!new_value->SameValue(*old_value)) {
EnqueueChangeRecord(object, "update", name, old_value);
}
@@ -4182,9 +4160,9 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ if (!isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_SET)) {
return SetPropertyWithFailedAccessCheck(object, &lookup, name, value,
- false, kNonStrictMode);
+ false, SLOPPY);
}
}
@@ -4207,18 +4185,19 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
TransitionFlag flag = lookup.IsFound()
? OMIT_TRANSITION : INSERT_TRANSITION;
// Neither properties nor transitions found.
- return AddProperty(object, name, value, attributes, kNonStrictMode,
+ return AddProperty(object, name, value, attributes, SLOPPY,
MAY_BE_STORE_FROM_KEYED, extensibility_check, value_type, mode, flag);
}
Handle<Object> old_value = isolate->factory()->the_hole_value();
PropertyAttributes old_attributes = ABSENT;
- bool is_observed = FLAG_harmony_observation &&
- object->map()->is_observed() &&
+ bool is_observed = object->map()->is_observed() &&
*name != isolate->heap()->hidden_string();
if (is_observed && lookup.IsProperty()) {
- if (lookup.IsDataProperty()) old_value =
- Object::GetProperty(object, name);
+ if (lookup.IsDataProperty()) {
+ old_value = Object::GetProperty(object, name);
+ CHECK_NOT_EMPTY_HANDLE(isolate, old_value);
+ }
old_attributes = lookup.GetAttributes();
}
@@ -4263,6 +4242,7 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
bool value_changed = false;
if (new_lookup.IsDataProperty()) {
Handle<Object> new_value = Object::GetProperty(object, name);
+ CHECK_NOT_EMPTY_HANDLE(isolate, new_value);
value_changed = !old_value->SameValue(*new_value);
}
if (new_lookup.GetAttributes() != old_attributes) {
@@ -4279,20 +4259,22 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
PropertyAttributes JSObject::GetPropertyAttributePostInterceptor(
- JSObject* receiver,
- Name* name,
- bool continue_search) {
+ Handle<JSObject> object,
+ Handle<JSObject> receiver,
+ Handle<Name> name,
+ bool continue_search) {
// Check local property, ignore interceptor.
- LookupResult result(GetIsolate());
- LocalLookupRealNamedProperty(name, &result);
+ Isolate* isolate = object->GetIsolate();
+ LookupResult result(isolate);
+ object->LocalLookupRealNamedProperty(*name, &result);
if (result.IsFound()) return result.GetAttributes();
if (continue_search) {
// Continue searching via the prototype chain.
- Object* pt = GetPrototype();
- if (!pt->IsNull()) {
- return JSObject::cast(pt)->
- GetPropertyAttributeWithReceiver(receiver, name);
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (!proto->IsNull()) {
+ return JSReceiver::GetPropertyAttributeWithReceiver(
+ Handle<JSObject>::cast(proto), receiver, name);
}
}
return ABSENT;
@@ -4300,31 +4282,30 @@ PropertyAttributes JSObject::GetPropertyAttributePostInterceptor(
PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
- JSObject* receiver,
- Name* name,
- bool continue_search) {
+ Handle<JSObject> object,
+ Handle<JSObject> receiver,
+ Handle<Name> name,
+ bool continue_search) {
// TODO(rossberg): Support symbols in the API.
if (name->IsSymbol()) return ABSENT;
- Isolate* isolate = GetIsolate();
+ Isolate* isolate = object->GetIsolate();
HandleScope scope(isolate);
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc(isolate);
- Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
- Handle<JSObject> receiver_handle(receiver);
- Handle<JSObject> holder_handle(this);
- Handle<String> name_handle(String::cast(name));
- PropertyCallbackArguments args(isolate, interceptor->data(), receiver, this);
+ Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
+ PropertyCallbackArguments args(
+ isolate, interceptor->data(), *receiver, *object);
if (!interceptor->query()->IsUndefined()) {
v8::NamedPropertyQueryCallback query =
v8::ToCData<v8::NamedPropertyQueryCallback>(interceptor->query());
LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
+ ApiNamedPropertyAccess("interceptor-named-has", *object, *name));
v8::Handle<v8::Integer> result =
- args.Call(query, v8::Utils::ToLocal(name_handle));
+ args.Call(query, v8::Utils::ToLocal(Handle<String>::cast(name)));
if (!result.IsEmpty()) {
ASSERT(result->IsInt32());
return static_cast<PropertyAttributes>(result->Int32Value());
@@ -4333,44 +4314,45 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
v8::NamedPropertyGetterCallback getter =
v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-get-has", this, name));
+ ApiNamedPropertyAccess("interceptor-named-get-has", *object, *name));
v8::Handle<v8::Value> result =
- args.Call(getter, v8::Utils::ToLocal(name_handle));
+ args.Call(getter, v8::Utils::ToLocal(Handle<String>::cast(name)));
if (!result.IsEmpty()) return DONT_ENUM;
}
- return holder_handle->GetPropertyAttributePostInterceptor(*receiver_handle,
- *name_handle,
- continue_search);
+ return GetPropertyAttributePostInterceptor(
+ object, receiver, name, continue_search);
}
PropertyAttributes JSReceiver::GetPropertyAttributeWithReceiver(
- JSReceiver* receiver,
- Name* key) {
+ Handle<JSReceiver> object,
+ Handle<JSReceiver> receiver,
+ Handle<Name> key) {
uint32_t index = 0;
- if (IsJSObject() && key->AsArrayIndex(&index)) {
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- receiver, index, true);
+ if (object->IsJSObject() && key->AsArrayIndex(&index)) {
+ return JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(object), receiver, index, true);
}
// Named property.
- LookupResult lookup(GetIsolate());
- Lookup(key, &lookup);
- return GetPropertyAttributeForResult(receiver, &lookup, key, true);
+ LookupResult lookup(object->GetIsolate());
+ object->Lookup(*key, &lookup);
+ return GetPropertyAttributeForResult(object, receiver, &lookup, key, true);
}
PropertyAttributes JSReceiver::GetPropertyAttributeForResult(
- JSReceiver* receiver,
+ Handle<JSReceiver> object,
+ Handle<JSReceiver> receiver,
LookupResult* lookup,
- Name* name,
+ Handle<Name> name,
bool continue_search) {
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- JSObject* this_obj = JSObject::cast(this);
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayNamedAccess(this_obj, name, v8::ACCESS_HAS)) {
- return this_obj->GetPropertyAttributeWithFailedAccessCheck(
- receiver, lookup, name, continue_search);
+ if (object->IsAccessCheckNeeded()) {
+ Heap* heap = object->GetHeap();
+ Handle<JSObject> obj = Handle<JSObject>::cast(object);
+ if (!heap->isolate()->MayNamedAccessWrapper(obj, name, v8::ACCESS_HAS)) {
+ return JSObject::GetPropertyAttributeWithFailedAccessCheck(
+ obj, lookup, name, continue_search);
}
}
if (lookup->IsFound()) {
@@ -4381,12 +4363,15 @@ PropertyAttributes JSReceiver::GetPropertyAttributeForResult(
case CALLBACKS:
return lookup->GetAttributes();
case HANDLER: {
- return JSProxy::cast(lookup->proxy())->GetPropertyAttributeWithHandler(
- receiver, name);
+ return JSProxy::GetPropertyAttributeWithHandler(
+ handle(lookup->proxy()), receiver, name);
}
case INTERCEPTOR:
- return lookup->holder()->GetPropertyAttributeWithInterceptor(
- JSObject::cast(receiver), name, continue_search);
+ return JSObject::GetPropertyAttributeWithInterceptor(
+ handle(lookup->holder()),
+ Handle<JSObject>::cast(receiver),
+ name,
+ continue_search);
case TRANSITION:
case NONEXISTENT:
UNREACHABLE();
@@ -4396,67 +4381,74 @@ PropertyAttributes JSReceiver::GetPropertyAttributeForResult(
}
-PropertyAttributes JSReceiver::GetLocalPropertyAttribute(Name* name) {
+PropertyAttributes JSReceiver::GetLocalPropertyAttribute(
+ Handle<JSReceiver> object, Handle<Name> name) {
// Check whether the name is an array index.
uint32_t index = 0;
- if (IsJSObject() && name->AsArrayIndex(&index)) {
- return GetLocalElementAttribute(index);
+ if (object->IsJSObject() && name->AsArrayIndex(&index)) {
+ return GetLocalElementAttribute(object, index);
}
// Named property.
- LookupResult lookup(GetIsolate());
- LocalLookup(name, &lookup, true);
- return GetPropertyAttributeForResult(this, &lookup, name, false);
+ LookupResult lookup(object->GetIsolate());
+ object->LocalLookup(*name, &lookup, true);
+ return GetPropertyAttributeForResult(object, object, &lookup, name, false);
}
PropertyAttributes JSObject::GetElementAttributeWithReceiver(
- JSReceiver* receiver, uint32_t index, bool continue_search) {
- Isolate* isolate = GetIsolate();
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
+ bool continue_search) {
+ Isolate* isolate = object->GetIsolate();
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayIndexedAccessWrapper(object, index, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_HAS);
return ABSENT;
}
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
if (proto->IsNull()) return ABSENT;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->GetElementAttributeWithReceiver(
- receiver, index, continue_search);
+ return JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(proto), receiver, index, continue_search);
}
// Check for lookup interceptor except when bootstrapping.
- if (HasIndexedInterceptor() && !isolate->bootstrapper()->IsActive()) {
- return GetElementAttributeWithInterceptor(receiver, index, continue_search);
+ if (object->HasIndexedInterceptor() && !isolate->bootstrapper()->IsActive()) {
+ return JSObject::GetElementAttributeWithInterceptor(
+ object, receiver, index, continue_search);
}
return GetElementAttributeWithoutInterceptor(
- receiver, index, continue_search);
+ object, receiver, index, continue_search);
}
PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
- JSReceiver* receiver, uint32_t index, bool continue_search) {
- Isolate* isolate = GetIsolate();
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
+ bool continue_search) {
+ Isolate* isolate = object->GetIsolate();
HandleScope scope(isolate);
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- Handle<JSReceiver> hreceiver(receiver);
- Handle<JSObject> holder(this);
- PropertyCallbackArguments args(isolate, interceptor->data(), receiver, this);
+ Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
+ PropertyCallbackArguments args(
+ isolate, interceptor->data(), *receiver, *object);
if (!interceptor->query()->IsUndefined()) {
v8::IndexedPropertyQueryCallback query =
v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query());
LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
+ ApiIndexedPropertyAccess("interceptor-indexed-has", *object, index));
v8::Handle<v8::Integer> result = args.Call(query, index);
if (!result.IsEmpty())
return static_cast<PropertyAttributes>(result->Int32Value());
@@ -4464,37 +4456,42 @@ PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
v8::IndexedPropertyGetterCallback getter =
v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-get-has", this, index));
+ ApiIndexedPropertyAccess(
+ "interceptor-indexed-get-has", *object, index));
v8::Handle<v8::Value> result = args.Call(getter, index);
if (!result.IsEmpty()) return NONE;
}
- return holder->GetElementAttributeWithoutInterceptor(
- *hreceiver, index, continue_search);
+ return GetElementAttributeWithoutInterceptor(
+ object, receiver, index, continue_search);
}
PropertyAttributes JSObject::GetElementAttributeWithoutInterceptor(
- JSReceiver* receiver, uint32_t index, bool continue_search) {
- PropertyAttributes attr = GetElementsAccessor()->GetAttributes(
- receiver, this, index);
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
+ bool continue_search) {
+ PropertyAttributes attr = object->GetElementsAccessor()->GetAttributes(
+ *receiver, *object, index);
if (attr != ABSENT) return attr;
// Handle [] on String objects.
- if (IsStringObjectWithCharacterAt(index)) {
+ if (object->IsStringObjectWithCharacterAt(index)) {
return static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
}
if (!continue_search) return ABSENT;
- Object* pt = GetPrototype();
- if (pt->IsJSProxy()) {
+ Handle<Object> proto(object->GetPrototype(), object->GetIsolate());
+ if (proto->IsJSProxy()) {
// We need to follow the spec and simulate a call to [[GetOwnProperty]].
- return JSProxy::cast(pt)->GetElementAttributeWithHandler(receiver, index);
+ return JSProxy::GetElementAttributeWithHandler(
+ Handle<JSProxy>::cast(proto), receiver, index);
}
- if (pt->IsNull()) return ABSENT;
- return JSObject::cast(pt)->GetElementAttributeWithReceiver(
- receiver, index, true);
+ if (proto->IsNull()) return ABSENT;
+ return GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(proto), receiver, index, true);
}
@@ -4640,12 +4637,12 @@ void JSObject::NormalizeProperties(Handle<JSObject> object,
int new_instance_size = new_map->instance_size();
int instance_size_delta = map->instance_size() - new_instance_size;
ASSERT(instance_size_delta >= 0);
- isolate->heap()->CreateFillerObjectAt(object->address() + new_instance_size,
- instance_size_delta);
- if (Marking::IsBlack(Marking::MarkBitFrom(*object))) {
- MemoryChunk::IncrementLiveBytesFromMutator(object->address(),
- -instance_size_delta);
- }
+ Heap* heap = isolate->heap();
+ heap->CreateFillerObjectAt(object->address() + new_instance_size,
+ instance_size_delta);
+ heap->AdjustLiveBytes(object->address(),
+ -instance_size_delta,
+ Heap::FROM_MUTATOR);
object->set_map(*new_map);
map->NotifyLeafMapLayoutChange();
@@ -4674,119 +4671,92 @@ void JSObject::TransformToFastProperties(Handle<JSObject> object,
}
-static MUST_USE_RESULT MaybeObject* CopyFastElementsToDictionary(
- Isolate* isolate,
- FixedArrayBase* array,
+static Handle<SeededNumberDictionary> CopyFastElementsToDictionary(
+ Handle<FixedArrayBase> array,
int length,
- SeededNumberDictionary* dictionary) {
- Heap* heap = isolate->heap();
+ Handle<SeededNumberDictionary> dictionary) {
+ Isolate* isolate = array->GetIsolate();
+ Factory* factory = isolate->factory();
bool has_double_elements = array->IsFixedDoubleArray();
for (int i = 0; i < length; i++) {
- Object* value = NULL;
+ Handle<Object> value;
if (has_double_elements) {
- FixedDoubleArray* double_array = FixedDoubleArray::cast(array);
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(array);
if (double_array->is_the_hole(i)) {
- value = isolate->heap()->the_hole_value();
+ value = factory->the_hole_value();
} else {
- // Objects must be allocated in the old object space, since the
- // overall number of HeapNumbers needed for the conversion might
- // exceed the capacity of new space, and we would fail repeatedly
- // trying to convert the FixedDoubleArray.
- MaybeObject* maybe_value_object =
- heap->AllocateHeapNumber(double_array->get_scalar(i), TENURED);
- if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
+ value = factory->NewHeapNumber(double_array->get_scalar(i));
}
} else {
- value = FixedArray::cast(array)->get(i);
+ value = handle(Handle<FixedArray>::cast(array)->get(i), isolate);
}
if (!value->IsTheHole()) {
PropertyDetails details = PropertyDetails(NONE, NORMAL, 0);
- MaybeObject* maybe_result =
- dictionary->AddNumberEntry(i, value, details);
- if (!maybe_result->To(&dictionary)) return maybe_result;
+ dictionary =
+ SeededNumberDictionary::AddNumberEntry(dictionary, i, value, details);
}
}
return dictionary;
}
-static Handle<SeededNumberDictionary> CopyFastElementsToDictionary(
- Handle<FixedArrayBase> array,
- int length,
- Handle<SeededNumberDictionary> dict) {
- Isolate* isolate = array->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- CopyFastElementsToDictionary(
- isolate, *array, length, *dict),
- SeededNumberDictionary);
-}
-
-
Handle<SeededNumberDictionary> JSObject::NormalizeElements(
Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->NormalizeElements(),
- SeededNumberDictionary);
-}
-
-
-MaybeObject* JSObject::NormalizeElements() {
- ASSERT(!HasExternalArrayElements());
+ ASSERT(!object->HasExternalArrayElements() &&
+ !object->HasFixedTypedArrayElements());
+ Isolate* isolate = object->GetIsolate();
+ Factory* factory = isolate->factory();
// Find the backing store.
- FixedArrayBase* array = FixedArrayBase::cast(elements());
- Map* old_map = array->map();
+ Handle<FixedArrayBase> array(FixedArrayBase::cast(object->elements()));
bool is_arguments =
- (old_map == old_map->GetHeap()->non_strict_arguments_elements_map());
+ (array->map() == isolate->heap()->sloppy_arguments_elements_map());
if (is_arguments) {
- array = FixedArrayBase::cast(FixedArray::cast(array)->get(1));
+ array = handle(FixedArrayBase::cast(
+ Handle<FixedArray>::cast(array)->get(1)));
}
- if (array->IsDictionary()) return array;
+ if (array->IsDictionary()) return Handle<SeededNumberDictionary>::cast(array);
- ASSERT(HasFastSmiOrObjectElements() ||
- HasFastDoubleElements() ||
- HasFastArgumentsElements());
+ ASSERT(object->HasFastSmiOrObjectElements() ||
+ object->HasFastDoubleElements() ||
+ object->HasFastArgumentsElements());
// Compute the effective length and allocate a new backing store.
- int length = IsJSArray()
- ? Smi::cast(JSArray::cast(this)->length())->value()
+ int length = object->IsJSArray()
+ ? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
: array->length();
int old_capacity = 0;
int used_elements = 0;
- GetElementsCapacityAndUsage(&old_capacity, &used_elements);
- SeededNumberDictionary* dictionary;
- MaybeObject* maybe_dictionary =
- SeededNumberDictionary::Allocate(GetHeap(), used_elements);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ object->GetElementsCapacityAndUsage(&old_capacity, &used_elements);
+ Handle<SeededNumberDictionary> dictionary =
+ factory->NewSeededNumberDictionary(used_elements);
- maybe_dictionary = CopyFastElementsToDictionary(
- GetIsolate(), array, length, dictionary);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ dictionary = CopyFastElementsToDictionary(array, length, dictionary);
// Switch to using the dictionary as the backing storage for elements.
if (is_arguments) {
- FixedArray::cast(elements())->set(1, dictionary);
+ FixedArray::cast(object->elements())->set(1, *dictionary);
} else {
// Set the new map first to satify the elements type assert in
// set_elements().
- Map* new_map;
- MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(),
- DICTIONARY_ELEMENTS);
- if (!maybe->To(&new_map)) return maybe;
- set_map(new_map);
- set_elements(dictionary);
+ Handle<Map> new_map =
+ JSObject::GetElementsTransitionMap(object, DICTIONARY_ELEMENTS);
+
+ JSObject::MigrateToMap(object, new_map);
+ object->set_elements(*dictionary);
}
- old_map->GetHeap()->isolate()->counters()->elements_to_dictionary()->
- Increment();
+ isolate->counters()->elements_to_dictionary()->Increment();
#ifdef DEBUG
if (FLAG_trace_normalization) {
PrintF("Object elements have been normalized:\n");
- Print();
+ object->Print();
}
#endif
- ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
+ ASSERT(object->HasDictionaryElements() ||
+ object->HasDictionaryArgumentsElements());
return dictionary;
}
@@ -4952,10 +4922,10 @@ void JSObject::DeleteHiddenProperty(Handle<JSObject> object, Handle<Name> key) {
}
-bool JSObject::HasHiddenProperties() {
- return GetPropertyAttributePostInterceptor(this,
- GetHeap()->hidden_string(),
- false) != ABSENT;
+bool JSObject::HasHiddenProperties(Handle<JSObject> object) {
+ Handle<Name> hidden = object->GetIsolate()->factory()->hidden_string();
+ return GetPropertyAttributePostInterceptor(
+ object, object, hidden, false) != ABSENT;
}
@@ -5036,7 +5006,7 @@ Handle<Object> JSObject::SetHiddenPropertiesHashTable(Handle<JSObject> object,
// We can store the identity hash inline iff there is no backing store
// for hidden properties yet.
- ASSERT(object->HasHiddenProperties() != value->IsSmi());
+ ASSERT(JSObject::HasHiddenProperties(object) != value->IsSmi());
if (object->HasFastProperties()) {
// If the object has fast properties, check whether the first slot
// in the descriptor array matches the hidden string. Since the
@@ -5115,18 +5085,6 @@ Handle<Object> JSObject::DeletePropertyWithInterceptor(Handle<JSObject> object,
}
-// TODO(mstarzinger): Temporary wrapper until handlified.
-static Handle<Object> AccessorDelete(Handle<JSObject> object,
- uint32_t index,
- JSObject::DeleteMode mode) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->GetElementsAccessor()->Delete(*object,
- index,
- mode),
- Object);
-}
-
-
Handle<Object> JSObject::DeleteElementWithInterceptor(Handle<JSObject> object,
uint32_t index) {
Isolate* isolate = object->GetIsolate();
@@ -5153,7 +5111,8 @@ Handle<Object> JSObject::DeleteElementWithInterceptor(Handle<JSObject> object,
// Rebox CustomArguments::kReturnValueOffset before returning.
return handle(*result_internal, isolate);
}
- Handle<Object> delete_result = AccessorDelete(object, index, NORMAL_DELETION);
+ Handle<Object> delete_result = object->GetElementsAccessor()->Delete(
+ object, index, NORMAL_DELETION);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return delete_result;
}
@@ -5167,8 +5126,8 @@ Handle<Object> JSObject::DeleteElement(Handle<JSObject> object,
// Check access rights if needed.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayIndexedAccess(*object, index, v8::ACCESS_DELETE)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_DELETE);
+ !isolate->MayIndexedAccessWrapper(object, index, v8::ACCESS_DELETE)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_DELETE);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return factory->false_value();
}
@@ -5196,12 +5155,14 @@ Handle<Object> JSObject::DeleteElement(Handle<JSObject> object,
Handle<Object> old_value;
bool should_enqueue_change_record = false;
- if (FLAG_harmony_observation && object->map()->is_observed()) {
+ if (object->map()->is_observed()) {
should_enqueue_change_record = HasLocalElement(object, index);
if (should_enqueue_change_record) {
- old_value = object->GetLocalElementAccessorPair(index) != NULL
- ? Handle<Object>::cast(factory->the_hole_value())
- : Object::GetElement(isolate, object, index);
+ if (object->GetLocalElementAccessorPair(index) != NULL) {
+ old_value = Handle<Object>::cast(factory->the_hole_value());
+ } else {
+ old_value = Object::GetElementNoExceptionThrown(isolate, object, index);
+ }
}
}
@@ -5210,7 +5171,7 @@ Handle<Object> JSObject::DeleteElement(Handle<JSObject> object,
if (object->HasIndexedInterceptor() && mode != FORCE_DELETION) {
result = DeleteElementWithInterceptor(object, index);
} else {
- result = AccessorDelete(object, index, mode);
+ result = object->GetElementsAccessor()->Delete(object, index, mode);
}
if (should_enqueue_change_record && !HasLocalElement(object, index)) {
@@ -5231,8 +5192,8 @@ Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
// Check access rights if needed.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object, *name, v8::ACCESS_DELETE)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_DELETE);
+ !isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_DELETE)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_DELETE);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->false_value();
}
@@ -5267,11 +5228,11 @@ Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
}
Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = FLAG_harmony_observation &&
- object->map()->is_observed() &&
+ bool is_observed = object->map()->is_observed() &&
*name != isolate->heap()->hidden_string();
if (is_observed && lookup.IsDataProperty()) {
old_value = Object::GetProperty(object, name);
+ CHECK_NOT_EMPTY_HANDLE(isolate, old_value);
}
Handle<Object> result;
@@ -5390,7 +5351,7 @@ bool JSObject::ReferencesObject(Object* obj) {
if (ReferencesObjectFromElements(elements, kind, obj)) return true;
break;
}
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
FixedArray* parameter_map = FixedArray::cast(elements());
// Check the mapped parameters.
int length = parameter_map->length();
@@ -5412,7 +5373,7 @@ bool JSObject::ReferencesObject(Object* obj) {
// Get the constructor function for arguments array.
JSObject* arguments_boilerplate =
heap->isolate()->context()->native_context()->
- arguments_boilerplate();
+ sloppy_arguments_boilerplate();
JSFunction* arguments_function =
JSFunction::cast(arguments_boilerplate->map()->constructor());
@@ -5441,6 +5402,12 @@ bool JSObject::ReferencesObject(Object* obj) {
// Check the context extension (if any) if it can have references.
if (context->has_extension() && !context->IsCatchContext()) {
+ // With harmony scoping, a JSFunction may have a global context.
+ // TODO(mvstanton): walk into the ScopeInfo.
+ if (FLAG_harmony_scoping && context->IsGlobalContext()) {
+ return false;
+ }
+
return JSObject::cast(context->extension())->ReferencesObject(obj);
}
}
@@ -5456,10 +5423,10 @@ Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
if (!object->map()->is_extensible()) return object;
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ !isolate->MayNamedAccessWrapper(object,
+ isolate->factory()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_KEYS);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->false_value();
}
@@ -5472,7 +5439,8 @@ Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
}
// It's not possible to seal objects with external array elements
- if (object->HasExternalArrayElements()) {
+ if (object->HasExternalArrayElements() ||
+ object->HasFixedTypedArrayElements()) {
Handle<Object> error =
isolate->factory()->NewTypeError(
"cant_prevent_ext_external_array_elements",
@@ -5495,10 +5463,10 @@ Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
Handle<Map> new_map = Map::Copy(handle(object->map()));
new_map->set_is_extensible(false);
- object->set_map(*new_map);
+ JSObject::MigrateToMap(object, new_map);
ASSERT(!object->map()->is_extensible());
- if (FLAG_harmony_observation && object->map()->is_observed()) {
+ if (object->map()->is_observed()) {
EnqueueChangeRecord(object, "preventExtensions", Handle<Name>(),
isolate->factory()->the_hole_value());
}
@@ -5528,18 +5496,18 @@ static void FreezeDictionary(Dictionary* dictionary) {
Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
- // Freezing non-strict arguments should be handled elsewhere.
- ASSERT(!object->HasNonStrictArgumentsElements());
+ // Freezing sloppy arguments should be handled elsewhere.
+ ASSERT(!object->HasSloppyArgumentsElements());
ASSERT(!object->map()->is_observed());
if (object->map()->is_frozen()) return object;
Isolate* isolate = object->GetIsolate();
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ !isolate->MayNamedAccessWrapper(object,
+ isolate->factory()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_KEYS);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->false_value();
}
@@ -5552,7 +5520,8 @@ Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
}
// It's not possible to freeze objects with external array elements
- if (object->HasExternalArrayElements()) {
+ if (object->HasExternalArrayElements() ||
+ object->HasFixedTypedArrayElements()) {
Handle<Object> error =
isolate->factory()->NewTypeError(
"cant_prevent_ext_external_array_elements",
@@ -5588,11 +5557,11 @@ Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
Handle<Map> old_map(object->map());
old_map->LookupTransition(*object, isolate->heap()->frozen_symbol(), &result);
if (result.IsTransition()) {
- Map* transition_map = result.GetTransitionTarget();
+ Handle<Map> transition_map(result.GetTransitionTarget());
ASSERT(transition_map->has_dictionary_elements());
ASSERT(transition_map->is_frozen());
ASSERT(!transition_map->is_extensible());
- object->set_map(transition_map);
+ JSObject::MigrateToMap(object, transition_map);
} else if (object->HasFastProperties() && old_map->CanHaveMoreTransitions()) {
// Create a new descriptor array with fully-frozen properties
int num_descriptors = old_map->NumberOfOwnDescriptors();
@@ -5605,7 +5574,7 @@ Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
new_map->freeze();
new_map->set_is_extensible(false);
new_map->set_elements_kind(DICTIONARY_ELEMENTS);
- object->set_map(*new_map);
+ JSObject::MigrateToMap(object, new_map);
} else {
// Slow path: need to normalize properties for safety
NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
@@ -5616,7 +5585,7 @@ Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
new_map->freeze();
new_map->set_is_extensible(false);
new_map->set_elements_kind(DICTIONARY_ELEMENTS);
- object->set_map(*new_map);
+ JSObject::MigrateToMap(object, new_map);
// Freeze dictionary-mode properties
FreezeDictionary(object->property_dictionary());
@@ -5660,7 +5629,7 @@ void JSObject::SetObserved(Handle<JSObject> object) {
new_map = Map::Copy(handle(object->map()));
new_map->set_is_observed();
}
- object->set_map(*new_map);
+ JSObject::MigrateToMap(object, new_map);
}
@@ -5781,7 +5750,7 @@ Handle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
ASSERT(names->get(i)->IsString());
Handle<String> key_string(String::cast(names->get(i)));
PropertyAttributes attributes =
- copy->GetLocalPropertyAttribute(*key_string);
+ JSReceiver::GetLocalPropertyAttribute(copy, key_string);
// Only deep copy fields from the object literal expression.
// In particular, don't try to copy the length attribute of
// an array.
@@ -5796,7 +5765,7 @@ Handle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
if (copying) {
// Creating object copy for literals. No strict mode needed.
CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetProperty(
- copy, key_string, result, NONE, kNonStrictMode));
+ copy, key_string, result, NONE, SLOPPY));
}
}
}
@@ -5855,7 +5824,7 @@ Handle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
}
break;
}
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNIMPLEMENTED();
break;
@@ -5913,9 +5882,9 @@ bool JSReceiver::IsSimpleEnum() {
JSObject* curr = JSObject::cast(o);
int enum_length = curr->map()->EnumLength();
if (enum_length == kInvalidEnumCacheSentinel) return false;
+ if (curr->IsAccessCheckNeeded()) return false;
ASSERT(!curr->HasNamedInterceptor());
ASSERT(!curr->HasIndexedInterceptor());
- ASSERT(!curr->IsAccessCheckNeeded());
if (curr->NumberOfEnumElements() > 0) return false;
if (curr != this && enum_length != 0) return false;
}
@@ -6116,7 +6085,7 @@ void JSObject::DefineElementAccessor(Handle<JSObject> object,
return;
}
break;
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
// Ascertain whether we have read-only properties or an existing
// getter/setter pair in an arguments elements dictionary backing
// store.
@@ -6199,9 +6168,10 @@ void JSObject::DefinePropertyAccessor(Handle<JSObject> object,
}
-bool JSObject::CanSetCallback(Name* name) {
- ASSERT(!IsAccessCheckNeeded() ||
- GetIsolate()->MayNamedAccess(this, name, v8::ACCESS_SET));
+bool JSObject::CanSetCallback(Handle<JSObject> object, Handle<Name> name) {
+ Isolate* isolate = object->GetIsolate();
+ ASSERT(!object->IsAccessCheckNeeded() ||
+ isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_SET));
// Check if there is an API defined callback object which prohibits
// callback overwriting in this object or its prototype chain.
@@ -6209,15 +6179,15 @@ bool JSObject::CanSetCallback(Name* name) {
// certain accessors such as window.location should not be allowed
// to be overwritten because allowing overwriting could potentially
// cause security problems.
- LookupResult callback_result(GetIsolate());
- LookupCallbackProperty(name, &callback_result);
+ LookupResult callback_result(isolate);
+ object->LookupCallbackProperty(*name, &callback_result);
if (callback_result.IsFound()) {
- Object* obj = callback_result.GetCallbackObject();
- if (obj->IsAccessorInfo()) {
- return !AccessorInfo::cast(obj)->prohibits_overwriting();
+ Object* callback_obj = callback_result.GetCallbackObject();
+ if (callback_obj->IsAccessorInfo()) {
+ return !AccessorInfo::cast(callback_obj)->prohibits_overwriting();
}
- if (obj->IsAccessorPair()) {
- return !AccessorPair::cast(obj)->prohibits_overwriting();
+ if (callback_obj->IsAccessorPair()) {
+ return !AccessorPair::cast(callback_obj)->prohibits_overwriting();
}
}
return true;
@@ -6267,7 +6237,7 @@ void JSObject::SetElementCallback(Handle<JSObject> object,
dictionary->set_requires_slow_elements();
// Update the dictionary backing store on the object.
- if (object->elements()->map() == heap->non_strict_arguments_elements_map()) {
+ if (object->elements()->map() == heap->sloppy_arguments_elements_map()) {
// Also delete any parameter alias.
//
// TODO(kmillikin): when deleting the last parameter alias we could
@@ -6324,8 +6294,8 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
Isolate* isolate = object->GetIsolate();
// Check access rights if needed.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ !isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_SET);
return;
}
@@ -6349,21 +6319,20 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
// Try to flatten before operating on the string.
if (name->IsString()) String::cast(*name)->TryFlatten();
- if (!object->CanSetCallback(*name)) return;
+ if (!JSObject::CanSetCallback(object, name)) return;
uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = FLAG_harmony_observation &&
- object->map()->is_observed() &&
+ bool is_observed = object->map()->is_observed() &&
*name != isolate->heap()->hidden_string();
bool preexists = false;
if (is_observed) {
if (is_element) {
preexists = HasLocalElement(object, index);
if (preexists && object->GetLocalElementAccessorPair(index) == NULL) {
- old_value = Object::GetElement(isolate, object, index);
+ old_value = Object::GetElementNoExceptionThrown(isolate, object, index);
}
} else {
LookupResult lookup(isolate);
@@ -6371,6 +6340,7 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
preexists = lookup.IsProperty();
if (preexists && lookup.IsDataProperty()) {
old_value = Object::GetProperty(object, name);
+ CHECK_NOT_EMPTY_HANDLE(isolate, old_value);
}
}
}
@@ -6390,11 +6360,11 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
}
-static bool TryAccessorTransition(JSObject* self,
- Map* transitioned_map,
+static bool TryAccessorTransition(Handle<JSObject> self,
+ Handle<Map> transitioned_map,
int target_descriptor,
AccessorComponent component,
- Object* accessor,
+ Handle<Object> accessor,
PropertyAttributes attributes) {
DescriptorArray* descs = transitioned_map->instance_descriptors();
PropertyDetails details = descs->GetDetails(target_descriptor);
@@ -6408,8 +6378,8 @@ static bool TryAccessorTransition(JSObject* self,
PropertyAttributes target_attributes = details.attributes();
// Reuse transition if adding same accessor with same attributes.
- if (target_accessor == accessor && target_attributes == attributes) {
- self->set_map(transitioned_map);
+ if (target_accessor == *accessor && target_attributes == attributes) {
+ JSObject::MigrateToMap(self, transitioned_map);
return true;
}
@@ -6471,14 +6441,14 @@ bool JSObject::DefineFastAccessor(Handle<JSObject> object,
object->map()->LookupTransition(*object, *name, &result);
if (result.IsFound()) {
- Map* target = result.GetTransitionTarget();
+ Handle<Map> target(result.GetTransitionTarget());
ASSERT(target->NumberOfOwnDescriptors() ==
object->map()->NumberOfOwnDescriptors());
// This works since descriptors are sorted in order of addition.
ASSERT(object->map()->instance_descriptors()->
GetKey(descriptor_number) == *name);
- return TryAccessorTransition(*object, target, descriptor_number,
- component, *accessor, attributes);
+ return TryAccessorTransition(object, target, descriptor_number,
+ component, accessor, attributes);
}
} else {
// If not, lookup a transition.
@@ -6486,12 +6456,12 @@ bool JSObject::DefineFastAccessor(Handle<JSObject> object,
// If there is a transition, try to follow it.
if (result.IsFound()) {
- Map* target = result.GetTransitionTarget();
+ Handle<Map> target(result.GetTransitionTarget());
int descriptor_number = target->LastAdded();
ASSERT(target->instance_descriptors()->GetKey(descriptor_number)
->Equals(*name));
- return TryAccessorTransition(*object, target, descriptor_number,
- component, *accessor, attributes);
+ return TryAccessorTransition(object, target, descriptor_number,
+ component, accessor, attributes);
}
}
@@ -6504,7 +6474,7 @@ bool JSObject::DefineFastAccessor(Handle<JSObject> object,
accessors->set(component, *accessor);
Handle<Map> new_map = CopyInsertDescriptor(Handle<Map>(object->map()),
name, accessors, attributes);
- object->set_map(*new_map);
+ JSObject::MigrateToMap(object, new_map);
return true;
}
@@ -6517,8 +6487,8 @@ Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
// Check access rights if needed.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ !isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_SET);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return factory->undefined_value();
}
@@ -6537,7 +6507,9 @@ Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
// Try to flatten before operating on the string.
if (name->IsString()) FlattenString(Handle<String>::cast(name));
- if (!object->CanSetCallback(*name)) return factory->undefined_value();
+ if (!JSObject::CanSetCallback(object, name)) {
+ return factory->undefined_value();
+ }
uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
@@ -6567,7 +6539,7 @@ Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
case DICTIONARY_ELEMENTS:
break;
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNIMPLEMENTED();
break;
}
@@ -6601,8 +6573,8 @@ Handle<Object> JSObject::GetAccessor(Handle<JSObject> object,
// Check access rights if needed.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object, *name, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
+ !isolate->MayNamedAccessWrapper(object, name, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_HAS);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->undefined_value();
}
@@ -6656,8 +6628,7 @@ Object* JSObject::SlowReverseLookup(Object* value) {
for (int i = 0; i < number_of_own_descriptors; i++) {
if (descs->GetType(i) == FIELD) {
Object* property = RawFastPropertyAt(descs->GetFieldIndex(i));
- if (FLAG_track_double_fields &&
- descs->GetDetails(i).representation().IsDouble()) {
+ if (descs->GetDetails(i).representation().IsDouble()) {
ASSERT(property->IsHeapNumber());
if (value->IsNumber() && property->Number() == value->Number()) {
return descs->GetKey(i);
@@ -6812,6 +6783,8 @@ MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors,
Map* map;
// Replace descriptors by new_descriptors in all maps that share it.
+
+ GetHeap()->incremental_marking()->RecordWrites(descriptors);
for (Object* current = GetBackPointer();
!current->IsUndefined();
current = map->GetBackPointer()) {
@@ -7509,9 +7482,11 @@ MaybeObject* CodeCache::UpdateNormalTypeCache(Name* name, Code* code) {
Object* CodeCache::Lookup(Name* name, Code::Flags flags) {
- flags = Code::RemoveTypeFromFlags(flags);
- Object* result = LookupDefaultCache(name, flags);
- if (result->IsCode()) return result;
+ Object* result = LookupDefaultCache(name, Code::RemoveTypeFromFlags(flags));
+ if (result->IsCode()) {
+ if (Code::cast(result)->flags() == flags) return result;
+ return GetHeap()->undefined_value();
+ }
return LookupNormalTypeCache(name, flags);
}
@@ -7859,7 +7834,8 @@ MaybeObject* PolymorphicCodeCacheHashTable::Put(MapHandleList* maps,
void FixedArray::Shrink(int new_length) {
ASSERT(0 <= new_length && new_length <= length());
if (new_length < length()) {
- RightTrimFixedArray<FROM_MUTATOR>(GetHeap(), this, length() - new_length);
+ RightTrimFixedArray<Heap::FROM_MUTATOR>(
+ GetHeap(), this, length() - new_length);
}
}
@@ -8224,7 +8200,7 @@ static bool IsIdentifier(UnicodeCache* cache, Name* name) {
// Checks whether the buffer contains an identifier (no escape).
if (!name->IsString()) return false;
String* string = String::cast(name);
- if (string->length() == 0) return false;
+ if (string->length() == 0) return true;
ConsStringIteratorOp op;
StringCharacterStream stream(string, &op);
if (!cache->IsIdentifierStart(stream.GetNext())) {
@@ -8240,9 +8216,7 @@ static bool IsIdentifier(UnicodeCache* cache, Name* name) {
bool Name::IsCacheable(Isolate* isolate) {
- return IsSymbol() ||
- IsIdentifier(isolate->unicode_cache(), this) ||
- this == isolate->heap()->hidden_string();
+ return IsSymbol() || IsIdentifier(isolate->unicode_cache(), this);
}
@@ -9199,10 +9173,7 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
// that are a multiple of pointer size.
heap->CreateFillerObjectAt(start_of_string + new_size, delta);
}
- if (Marking::IsBlack(Marking::MarkBitFrom(start_of_string))) {
- MemoryChunk::IncrementLiveBytesFromMutator(start_of_string, -delta);
- }
-
+ heap->AdjustLiveBytes(start_of_string, -delta, Heap::FROM_MUTATOR);
if (new_length == 0) return heap->isolate()->factory()->empty_string();
return string;
@@ -9308,11 +9279,12 @@ static void TrimEnumCache(Heap* heap, Map* map, DescriptorArray* descriptors) {
int to_trim = enum_cache->length() - live_enum;
if (to_trim <= 0) return;
- RightTrimFixedArray<FROM_GC>(heap, descriptors->GetEnumCache(), to_trim);
+ RightTrimFixedArray<Heap::FROM_GC>(
+ heap, descriptors->GetEnumCache(), to_trim);
if (!descriptors->HasEnumIndicesCache()) return;
FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
- RightTrimFixedArray<FROM_GC>(heap, enum_indices_cache, to_trim);
+ RightTrimFixedArray<Heap::FROM_GC>(heap, enum_indices_cache, to_trim);
}
@@ -9324,7 +9296,7 @@ static void TrimDescriptorArray(Heap* heap,
int to_trim = number_of_descriptors - number_of_own_descriptors;
if (to_trim == 0) return;
- RightTrimFixedArray<FROM_GC>(
+ RightTrimFixedArray<Heap::FROM_GC>(
heap, descriptors, to_trim * DescriptorArray::kDescriptorSize);
descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
@@ -9398,7 +9370,7 @@ void Map::ClearNonLiveTransitions(Heap* heap) {
int trim = t->number_of_transitions() - transition_index;
if (trim > 0) {
- RightTrimFixedArray<FROM_GC>(heap, t, t->IsSimpleTransition()
+ RightTrimFixedArray<Heap::FROM_GC>(heap, t, t->IsSimpleTransition()
? trim : trim * TransitionArray::kTransitionSize);
}
}
@@ -9448,13 +9420,13 @@ bool Map::EquivalentToForNormalization(Map* other,
void ConstantPoolArray::ConstantPoolIterateBody(ObjectVisitor* v) {
- if (count_of_ptr_entries() > 0) {
- int first_ptr_offset = OffsetOfElementAt(first_ptr_index());
- int last_ptr_offset =
- OffsetOfElementAt(first_ptr_index() + count_of_ptr_entries() - 1);
- v->VisitPointers(
- HeapObject::RawField(this, first_ptr_offset),
- HeapObject::RawField(this, last_ptr_offset));
+ for (int i = 0; i < count_of_code_ptr_entries(); i++) {
+ int index = first_code_ptr_index() + i;
+ v->VisitCodeEntry(reinterpret_cast<Address>(RawFieldOfElementAt(index)));
+ }
+ for (int i = 0; i < count_of_heap_ptr_entries(); i++) {
+ int index = first_heap_ptr_index() + i;
+ v->VisitPointer(RawFieldOfElementAt(index));
}
}
@@ -9622,38 +9594,42 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
const char* reason) {
if (optimized_code_map()->IsSmi()) return;
- int i;
- bool removed_entry = false;
FixedArray* code_map = FixedArray::cast(optimized_code_map());
- for (i = kEntriesStart; i < code_map->length(); i += kEntryLength) {
- ASSERT(code_map->get(i)->IsNativeContext());
- if (Code::cast(code_map->get(i + 1)) == optimized_code) {
+ int dst = kEntriesStart;
+ int length = code_map->length();
+ for (int src = kEntriesStart; src < length; src += kEntryLength) {
+ ASSERT(code_map->get(src)->IsNativeContext());
+ if (Code::cast(code_map->get(src + kCachedCodeOffset)) == optimized_code) {
+ // Evict the src entry by not copying it to the dst entry.
if (FLAG_trace_opt) {
PrintF("[evicting entry from optimizing code map (%s) for ", reason);
ShortPrint();
- PrintF("]\n");
+ BailoutId osr(Smi::cast(code_map->get(src + kOsrAstIdOffset))->value());
+ if (osr.IsNone()) {
+ PrintF("]\n");
+ } else {
+ PrintF(" (osr ast id %d)]\n", osr.ToInt());
+ }
}
- removed_entry = true;
- break;
+ } else {
+ // Keep the src entry by copying it to the dst entry.
+ if (dst != src) {
+ code_map->set(dst + kContextOffset,
+ code_map->get(src + kContextOffset));
+ code_map->set(dst + kCachedCodeOffset,
+ code_map->get(src + kCachedCodeOffset));
+ code_map->set(dst + kLiteralsOffset,
+ code_map->get(src + kLiteralsOffset));
+ code_map->set(dst + kOsrAstIdOffset,
+ code_map->get(src + kOsrAstIdOffset));
+ }
+ dst += kEntryLength;
}
}
- while (i < (code_map->length() - kEntryLength)) {
- code_map->set(i + kContextOffset,
- code_map->get(i + kContextOffset + kEntryLength));
- code_map->set(i + kCachedCodeOffset,
- code_map->get(i + kCachedCodeOffset + kEntryLength));
- code_map->set(i + kLiteralsOffset,
- code_map->get(i + kLiteralsOffset + kEntryLength));
- code_map->set(i + kOsrAstIdOffset,
- code_map->get(i + kOsrAstIdOffset + kEntryLength));
- i += kEntryLength;
- }
- if (removed_entry) {
+ if (dst != length) {
// Always trim even when array is cleared because of heap verifier.
- RightTrimFixedArray<FROM_MUTATOR>(GetHeap(), code_map, kEntryLength);
- if (code_map->length() == kEntriesStart) {
- ClearOptimizedCodeMap();
- }
+ RightTrimFixedArray<Heap::FROM_MUTATOR>(GetHeap(), code_map, length - dst);
+ if (code_map->length() == kEntriesStart) ClearOptimizedCodeMap();
}
}
@@ -9663,7 +9639,7 @@ void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
ASSERT(shrink_by % kEntryLength == 0);
ASSERT(shrink_by <= code_map->length() - kEntriesStart);
// Always trim even when array is cleared because of heap verifier.
- RightTrimFixedArray<FROM_GC>(GetHeap(), code_map, shrink_by);
+ RightTrimFixedArray<Heap::FROM_GC>(GetHeap(), code_map, shrink_by);
if (code_map->length() == kEntriesStart) {
ClearOptimizedCodeMap();
}
@@ -9781,7 +9757,7 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
// different prototype.
Handle<Map> new_map = Map::Copy(handle(function->map()));
- function->set_map(*new_map);
+ JSObject::MigrateToMap(function, new_map);
new_map->set_constructor(*value);
new_map->set_non_instance_prototype(true);
Isolate* isolate = new_map->GetIsolate();
@@ -9798,15 +9774,15 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
void JSFunction::RemovePrototype() {
Context* native_context = context()->native_context();
- Map* no_prototype_map = shared()->is_classic_mode()
- ? native_context->function_without_prototype_map()
- : native_context->strict_mode_function_without_prototype_map();
+ Map* no_prototype_map = shared()->strict_mode() == SLOPPY
+ ? native_context->sloppy_function_without_prototype_map()
+ : native_context->strict_function_without_prototype_map();
if (map() == no_prototype_map) return;
- ASSERT(map() == (shared()->is_classic_mode()
- ? native_context->function_map()
- : native_context->strict_mode_function_map()));
+ ASSERT(map() == (shared()->strict_mode() == SLOPPY
+ ? native_context->sloppy_function_map()
+ : native_context->strict_function_map()));
set_map(no_prototype_map);
set_prototype_or_initial_map(no_prototype_map->GetHeap()->the_hole_value());
@@ -10491,21 +10467,20 @@ Map* Code::FindFirstMap() {
}
-void Code::ReplaceNthObject(int n,
- Map* match_map,
- Object* replace_with) {
+void Code::FindAndReplace(const FindAndReplacePattern& pattern) {
ASSERT(is_inline_cache_stub() || is_handler());
DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ STATIC_ASSERT(FindAndReplacePattern::kMaxCount < 32);
+ int current_pattern = 0;
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
Object* object = info->target_object();
if (object->IsHeapObject()) {
- if (HeapObject::cast(object)->map() == match_map) {
- if (--n == 0) {
- info->set_target_object(replace_with);
- return;
- }
+ Map* map = HeapObject::cast(object)->map();
+ if (map == *pattern.find_[current_pattern]) {
+ info->set_target_object(*pattern.replace_[current_pattern]);
+ if (++current_pattern == pattern.count_) return;
}
}
}
@@ -10540,11 +10515,6 @@ void Code::FindAllTypes(TypeHandleList* types) {
}
-void Code::ReplaceFirstMap(Map* replace_with) {
- ReplaceNthObject(1, GetHeap()->meta_map(), replace_with);
-}
-
-
Code* Code::FindFirstHandler() {
ASSERT(is_inline_cache_stub());
DisallowHeapAllocation no_allocation;
@@ -10590,21 +10560,6 @@ Name* Code::FindFirstName() {
}
-void Code::ReplaceNthCell(int n, Cell* replace_with) {
- ASSERT(is_inline_cache_stub());
- DisallowHeapAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::CELL);
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (--n == 0) {
- info->set_target_cell(replace_with);
- return;
- }
- }
- UNREACHABLE();
-}
-
-
void Code::ClearInlineCaches() {
ClearInlineCaches(NULL);
}
@@ -10624,25 +10579,26 @@ void Code::ClearInlineCaches(Code::Kind* kind) {
Code* target(Code::GetCodeFromTargetAddress(info->target_address()));
if (target->is_inline_cache_stub()) {
if (kind == NULL || *kind == target->kind()) {
- IC::Clear(this->GetIsolate(), info->pc());
+ IC::Clear(this->GetIsolate(), info->pc(),
+ info->host()->constant_pool());
}
}
}
}
-void Code::ClearTypeFeedbackCells(Heap* heap) {
+void Code::ClearTypeFeedbackInfo(Heap* heap) {
if (kind() != FUNCTION) return;
Object* raw_info = type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
- TypeFeedbackCells* type_feedback_cells =
- TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
- for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
- Cell* cell = type_feedback_cells->GetCell(i);
- // Don't clear AllocationSites
- Object* value = cell->value();
- if (value == NULL || !value->IsAllocationSite()) {
- cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
+ FixedArray* feedback_vector =
+ TypeFeedbackInfo::cast(raw_info)->feedback_vector();
+ for (int i = 0; i < feedback_vector->length(); i++) {
+ Object* obj = feedback_vector->get(i);
+ if (!obj->IsAllocationSite()) {
+ // TODO(mvstanton): Can't I avoid a write barrier for this sentinel?
+ feedback_vector->set(i,
+ TypeFeedbackInfo::RawUninitializedSentinel(heap));
}
}
}
@@ -11068,9 +11024,7 @@ void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
switch (kind) {
case STORE_IC:
case KEYED_STORE_IC:
- if (extra == kStrictMode) {
- name = "STRICT";
- }
+ if (extra == STRICT) name = "STRICT";
break;
default:
break;
@@ -11091,8 +11045,7 @@ void Code::Disassemble(const char* name, FILE* out) {
}
if (is_inline_cache_stub()) {
PrintF(out, "ic_state = %s\n", ICState2String(ic_state()));
- PrintExtraICState(out, kind(), needs_extended_extra_ic_state(kind()) ?
- extended_extra_ic_state() : extra_ic_state());
+ PrintExtraICState(out, kind(), extra_ic_state());
if (ic_state() == MONOMORPHIC) {
PrintF(out, "type = %s\n", StubType2String(type()));
}
@@ -11192,33 +11145,20 @@ Handle<FixedArray> JSObject::SetFastElementsCapacityAndLength(
int capacity,
int length,
SetFastElementsCapacitySmiMode smi_mode) {
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->SetFastElementsCapacityAndLength(capacity, length, smi_mode),
- FixedArray);
-}
-
-
-MaybeObject* JSObject::SetFastElementsCapacityAndLength(
- int capacity,
- int length,
- SetFastElementsCapacitySmiMode smi_mode) {
- Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
- ASSERT(!HasExternalArrayElements());
+ ASSERT(!object->HasExternalArrayElements());
// Allocate a new fast elements backing store.
- FixedArray* new_elements;
- MaybeObject* maybe = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe->To(&new_elements)) return maybe;
+ Handle<FixedArray> new_elements =
+ object->GetIsolate()->factory()->NewUninitializedFixedArray(capacity);
- ElementsKind elements_kind = GetElementsKind();
+ ElementsKind elements_kind = object->GetElementsKind();
ElementsKind new_elements_kind;
// The resized array has FAST_*_SMI_ELEMENTS if the capacity mode forces it,
// or if it's allowed and the old elements array contained only SMIs.
bool has_fast_smi_elements =
(smi_mode == kForceSmiElements) ||
- ((smi_mode == kAllowSmiElements) && HasFastSmiElements());
+ ((smi_mode == kAllowSmiElements) && object->HasFastSmiElements());
if (has_fast_smi_elements) {
if (IsHoleyElementsKind(elements_kind)) {
new_elements_kind = FAST_HOLEY_SMI_ELEMENTS;
@@ -11232,83 +11172,47 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
new_elements_kind = FAST_ELEMENTS;
}
}
- FixedArrayBase* old_elements = elements();
+ Handle<FixedArrayBase> old_elements(object->elements());
ElementsAccessor* accessor = ElementsAccessor::ForKind(new_elements_kind);
- MaybeObject* maybe_obj =
- accessor->CopyElements(this, new_elements, elements_kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
+ accessor->CopyElements(object, new_elements, elements_kind);
- if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
- Map* new_map = map();
- if (new_elements_kind != elements_kind) {
- MaybeObject* maybe =
- GetElementsTransitionMap(GetIsolate(), new_elements_kind);
- if (!maybe->To(&new_map)) return maybe;
- }
- ValidateElements();
- set_map_and_elements(new_map, new_elements);
+ if (elements_kind != SLOPPY_ARGUMENTS_ELEMENTS) {
+ Handle<Map> new_map = (new_elements_kind != elements_kind)
+ ? GetElementsTransitionMap(object, new_elements_kind)
+ : handle(object->map());
+ object->ValidateElements();
+ object->set_map_and_elements(*new_map, *new_elements);
// Transition through the allocation site as well if present.
- maybe_obj = UpdateAllocationSite(new_elements_kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
+ JSObject::UpdateAllocationSite(object, new_elements_kind);
} else {
- FixedArray* parameter_map = FixedArray::cast(old_elements);
- parameter_map->set(1, new_elements);
+ Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(old_elements);
+ parameter_map->set(1, *new_elements);
}
if (FLAG_trace_elements_transitions) {
- PrintElementsTransition(stdout, elements_kind, old_elements,
- GetElementsKind(), new_elements);
+ PrintElementsTransition(stdout, object, elements_kind, old_elements,
+ object->GetElementsKind(), new_elements);
}
- if (IsJSArray()) {
- JSArray::cast(this)->set_length(Smi::FromInt(length));
+ if (object->IsJSArray()) {
+ Handle<JSArray>::cast(object)->set_length(Smi::FromInt(length));
}
return new_elements;
}
-bool Code::IsWeakEmbeddedObject(Kind kind, Object* object) {
- if (kind != Code::OPTIMIZED_FUNCTION) return false;
-
- if (object->IsMap()) {
- return Map::cast(object)->CanTransition() &&
- FLAG_collect_maps &&
- FLAG_weak_embedded_maps_in_optimized_code;
- }
-
- if (object->IsJSObject() ||
- (object->IsCell() && Cell::cast(object)->value()->IsJSObject())) {
- return FLAG_weak_embedded_objects_in_optimized_code;
- }
-
- return false;
-}
-
-
void JSObject::SetFastDoubleElementsCapacityAndLength(Handle<JSObject> object,
int capacity,
int length) {
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->SetFastDoubleElementsCapacityAndLength(capacity, length));
-}
-
-
-MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
- int capacity,
- int length) {
- Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
- ASSERT(!HasExternalArrayElements());
+ ASSERT(!object->HasExternalArrayElements());
- FixedArrayBase* elems;
- { MaybeObject* maybe_obj =
- heap->AllocateUninitializedFixedDoubleArray(capacity);
- if (!maybe_obj->To(&elems)) return maybe_obj;
- }
+ Handle<FixedArrayBase> elems =
+ object->GetIsolate()->factory()->NewFixedDoubleArray(capacity);
- ElementsKind elements_kind = GetElementsKind();
+ ElementsKind elements_kind = object->GetElementsKind();
+ CHECK(elements_kind != SLOPPY_ARGUMENTS_ELEMENTS);
ElementsKind new_elements_kind = elements_kind;
if (IsHoleyElementsKind(elements_kind)) {
new_elements_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
@@ -11316,49 +11220,37 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
new_elements_kind = FAST_DOUBLE_ELEMENTS;
}
- Map* new_map;
- { MaybeObject* maybe_obj =
- GetElementsTransitionMap(heap->isolate(), new_elements_kind);
- if (!maybe_obj->To(&new_map)) return maybe_obj;
- }
+ Handle<Map> new_map = GetElementsTransitionMap(object, new_elements_kind);
- FixedArrayBase* old_elements = elements();
+ Handle<FixedArrayBase> old_elements(object->elements());
ElementsAccessor* accessor = ElementsAccessor::ForKind(FAST_DOUBLE_ELEMENTS);
- { MaybeObject* maybe_obj =
- accessor->CopyElements(this, elems, elements_kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
- }
- if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
- ValidateElements();
- set_map_and_elements(new_map, elems);
- } else {
- FixedArray* parameter_map = FixedArray::cast(old_elements);
- parameter_map->set(1, elems);
- }
+ accessor->CopyElements(object, elems, elements_kind);
+
+ object->ValidateElements();
+ object->set_map_and_elements(*new_map, *elems);
if (FLAG_trace_elements_transitions) {
- PrintElementsTransition(stdout, elements_kind, old_elements,
- GetElementsKind(), elems);
+ PrintElementsTransition(stdout, object, elements_kind, old_elements,
+ object->GetElementsKind(), elems);
}
- if (IsJSArray()) {
- JSArray::cast(this)->set_length(Smi::FromInt(length));
+ if (object->IsJSArray()) {
+ Handle<JSArray>::cast(object)->set_length(Smi::FromInt(length));
}
-
- return this;
}
-MaybeObject* JSArray::Initialize(int capacity, int length) {
+// static
+void JSArray::Initialize(Handle<JSArray> array, int capacity, int length) {
ASSERT(capacity >= 0);
- return GetHeap()->AllocateJSArrayStorage(this, length, capacity,
- INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ array->GetIsolate()->factory()->NewJSArrayStorage(
+ array, length, capacity, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
}
-void JSArray::Expand(int required_size) {
- GetIsolate()->factory()->SetElementsCapacityAndLength(
- Handle<JSArray>(this), required_size, required_size);
+void JSArray::Expand(Handle<JSArray> array, int required_size) {
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ accessor->SetCapacityAndLength(array, required_size, required_size);
}
@@ -11370,12 +11262,17 @@ static bool GetOldValue(Isolate* isolate,
uint32_t index,
List<Handle<Object> >* old_values,
List<uint32_t>* indices) {
- PropertyAttributes attributes = object->GetLocalElementAttribute(index);
+ PropertyAttributes attributes =
+ JSReceiver::GetLocalElementAttribute(object, index);
ASSERT(attributes != ABSENT);
if (attributes == DONT_DELETE) return false;
- old_values->Add(object->GetLocalElementAccessorPair(index) == NULL
- ? Object::GetElement(isolate, object, index)
- : Handle<Object>::cast(isolate->factory()->the_hole_value()));
+ Handle<Object> value;
+ if (object->GetLocalElementAccessorPair(index) != NULL) {
+ value = Handle<Object>::cast(isolate->factory()->the_hole_value());
+ } else {
+ value = Object::GetElementNoExceptionThrown(isolate, object, index);
+ }
+ old_values->Add(value);
indices->Add(index);
return true;
}
@@ -11430,67 +11327,67 @@ static void EndPerformSplice(Handle<JSArray> object) {
}
-MaybeObject* JSArray::SetElementsLength(Object* len) {
+Handle<Object> JSArray::SetElementsLength(Handle<JSArray> array,
+ Handle<Object> new_length_handle) {
// We should never end in here with a pixel or external array.
- ASSERT(AllowsSetElementsLength());
- if (!(FLAG_harmony_observation && map()->is_observed()))
- return GetElementsAccessor()->SetLength(this, len);
+ ASSERT(array->AllowsSetElementsLength());
+ if (!array->map()->is_observed()) {
+ return array->GetElementsAccessor()->SetLength(array, new_length_handle);
+ }
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSArray> self(this);
+ Isolate* isolate = array->GetIsolate();
List<uint32_t> indices;
List<Handle<Object> > old_values;
- Handle<Object> old_length_handle(self->length(), isolate);
- Handle<Object> new_length_handle(len, isolate);
+ Handle<Object> old_length_handle(array->length(), isolate);
uint32_t old_length = 0;
CHECK(old_length_handle->ToArrayIndex(&old_length));
uint32_t new_length = 0;
- if (!new_length_handle->ToArrayIndex(&new_length))
- return Failure::InternalError();
+ CHECK(new_length_handle->ToArrayIndex(&new_length));
static const PropertyAttributes kNoAttrFilter = NONE;
- int num_elements = self->NumberOfLocalElements(kNoAttrFilter);
+ int num_elements = array->NumberOfLocalElements(kNoAttrFilter);
if (num_elements > 0) {
if (old_length == static_cast<uint32_t>(num_elements)) {
// Simple case for arrays without holes.
for (uint32_t i = old_length - 1; i + 1 > new_length; --i) {
- if (!GetOldValue(isolate, self, i, &old_values, &indices)) break;
+ if (!GetOldValue(isolate, array, i, &old_values, &indices)) break;
}
} else {
// For sparse arrays, only iterate over existing elements.
// TODO(rafaelw): For fast, sparse arrays, we can avoid iterating over
// the to-be-removed indices twice.
Handle<FixedArray> keys = isolate->factory()->NewFixedArray(num_elements);
- self->GetLocalElementKeys(*keys, kNoAttrFilter);
+ array->GetLocalElementKeys(*keys, kNoAttrFilter);
while (num_elements-- > 0) {
uint32_t index = NumberToUint32(keys->get(num_elements));
if (index < new_length) break;
- if (!GetOldValue(isolate, self, index, &old_values, &indices)) break;
+ if (!GetOldValue(isolate, array, index, &old_values, &indices)) break;
}
}
}
- MaybeObject* result =
- self->GetElementsAccessor()->SetLength(*self, *new_length_handle);
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
+ Handle<Object> hresult =
+ array->GetElementsAccessor()->SetLength(array, new_length_handle);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, hresult, hresult);
- CHECK(self->length()->ToArrayIndex(&new_length));
- if (old_length == new_length) return *hresult;
+ CHECK(array->length()->ToArrayIndex(&new_length));
+ if (old_length == new_length) return hresult;
- BeginPerformSplice(self);
+ BeginPerformSplice(array);
for (int i = 0; i < indices.length(); ++i) {
+ // For deletions where the property was an accessor, old_values[i]
+ // will be the hole, which instructs EnqueueChangeRecord to elide
+ // the "oldValue" property.
JSObject::EnqueueChangeRecord(
- self, "delete", isolate->factory()->Uint32ToString(indices[i]),
+ array, "delete", isolate->factory()->Uint32ToString(indices[i]),
old_values[i]);
}
JSObject::EnqueueChangeRecord(
- self, "update", isolate->factory()->length_string(),
+ array, "update", isolate->factory()->length_string(),
old_length_handle);
- EndPerformSplice(self);
+ EndPerformSplice(array);
uint32_t index = Min(old_length, new_length);
uint32_t add_count = new_length > old_length ? new_length - old_length : 0;
@@ -11498,18 +11395,21 @@ MaybeObject* JSArray::SetElementsLength(Object* len) {
Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
if (delete_count > 0) {
for (int i = indices.length() - 1; i >= 0; i--) {
+ // Skip deletions where the property was an accessor, leaving holes
+ // in the array of old values.
+ if (old_values[i]->IsTheHole()) continue;
JSObject::SetElement(deleted, indices[i] - index, old_values[i], NONE,
- kNonStrictMode);
+ SLOPPY);
}
SetProperty(deleted, isolate->factory()->length_string(),
isolate->factory()->NewNumberFromUint(delete_count),
- NONE, kNonStrictMode);
+ NONE, SLOPPY);
}
- EnqueueSpliceRecord(self, index, deleted, add_count);
+ EnqueueSpliceRecord(array, index, deleted, add_count);
- return *hresult;
+ return hresult;
}
@@ -11764,23 +11664,14 @@ bool DependentCode::MarkCodeForDeoptimization(
// Mark all the code that needs to be deoptimized.
bool marked = false;
for (int i = start; i < end; i++) {
- Object* object = object_at(i);
- // TODO(hpayer): This is a temporary hack. Foreign objects move after
- // new space evacuation. Since pretenuring may mark these objects as aborted
- // we have to follow the forwarding pointer in that case.
- MapWord map_word = HeapObject::cast(object)->map_word();
- if (map_word.IsForwardingAddress()) {
- object = map_word.ToForwardingAddress();
- }
- if (object->IsCode()) {
- Code* code = Code::cast(object);
+ if (is_code_at(i)) {
+ Code* code = code_at(i);
if (!code->marked_for_deoptimization()) {
code->set_marked_for_deoptimization(true);
marked = true;
}
} else {
- CompilationInfo* info = reinterpret_cast<CompilationInfo*>(
- Foreign::cast(object)->foreign_address());
+ CompilationInfo* info = compilation_info_at(i);
info->AbortDueToDependencyChange();
}
}
@@ -11886,7 +11777,7 @@ Handle<Object> JSObject::SetPrototype(Handle<JSObject> object,
new_map->set_prototype(*value);
}
ASSERT(new_map->prototype() == *value);
- real_receiver->set_map(*new_map);
+ JSObject::MigrateToMap(real_receiver, new_map);
if (!dictionary_elements_in_chain &&
new_map->DictionaryElementsInPrototypeChainOnly()) {
@@ -11902,16 +11793,16 @@ Handle<Object> JSObject::SetPrototype(Handle<JSObject> object,
}
-MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
- uint32_t first_arg,
- uint32_t arg_count,
- EnsureElementsMode mode) {
+void JSObject::EnsureCanContainElements(Handle<JSObject> object,
+ Arguments* args,
+ uint32_t first_arg,
+ uint32_t arg_count,
+ EnsureElementsMode mode) {
// Elements in |Arguments| are ordered backwards (because they're on the
// stack), but the method that's called here iterates over them in forward
// direction.
return EnsureCanContainElements(
- args->arguments() - first_arg - (arg_count - 1),
- arg_count, mode);
+ object, args->arguments() - first_arg - (arg_count - 1), arg_count, mode);
}
@@ -11952,7 +11843,7 @@ Handle<Object> JSObject::SetElementWithInterceptor(
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode) {
Isolate* isolate = object->GetIsolate();
@@ -12040,7 +11931,7 @@ Handle<Object> JSObject::SetElementWithCallback(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
Handle<JSObject> holder,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
Isolate* isolate = object->GetIsolate();
// We should never get here to initialize a const with the hole
@@ -12079,9 +11970,7 @@ Handle<Object> JSObject::SetElementWithCallback(Handle<JSObject> object,
return SetPropertyWithDefinedSetter(
object, Handle<JSReceiver>::cast(setter), value);
} else {
- if (strict_mode == kNonStrictMode) {
- return value;
- }
+ if (strict_mode == SLOPPY) return value;
Handle<Object> key(isolate->factory()->NewNumberFromUint(index));
Handle<Object> args[2] = { key, holder };
Handle<Object> error = isolate->factory()->NewTypeError(
@@ -12103,7 +11992,7 @@ bool JSObject::HasFastArgumentsElements() {
Heap* heap = GetHeap();
if (!elements()->IsFixedArray()) return false;
FixedArray* elements = FixedArray::cast(this->elements());
- if (elements->map() != heap->non_strict_arguments_elements_map()) {
+ if (elements->map() != heap->sloppy_arguments_elements_map()) {
return false;
}
FixedArray* arguments = FixedArray::cast(elements->get(1));
@@ -12115,7 +12004,7 @@ bool JSObject::HasDictionaryArgumentsElements() {
Heap* heap = GetHeap();
if (!elements()->IsFixedArray()) return false;
FixedArray* elements = FixedArray::cast(this->elements());
- if (elements->map() != heap->non_strict_arguments_elements_map()) {
+ if (elements->map() != heap->sloppy_arguments_elements_map()) {
return false;
}
FixedArray* arguments = FixedArray::cast(elements->get(1));
@@ -12129,7 +12018,7 @@ bool JSObject::HasDictionaryArgumentsElements() {
Handle<Object> JSObject::SetFastElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype) {
ASSERT(object->HasFastSmiOrObjectElements() ||
object->HasFastArgumentsElements());
@@ -12147,7 +12036,7 @@ Handle<Object> JSObject::SetFastElement(Handle<JSObject> object,
Handle<FixedArray> backing_store(FixedArray::cast(object->elements()));
if (backing_store->map() ==
- isolate->heap()->non_strict_arguments_elements_map()) {
+ isolate->heap()->sloppy_arguments_elements_map()) {
backing_store = handle(FixedArray::cast(backing_store->get(1)));
} else {
backing_store = EnsureWritableFastElements(object);
@@ -12227,7 +12116,7 @@ Handle<Object> JSObject::SetFastElement(Handle<JSObject> object,
UpdateAllocationSite(object, kind);
Handle<Map> new_map = GetElementsTransitionMap(object, kind);
- object->set_map(*new_map);
+ JSObject::MigrateToMap(object, new_map);
ASSERT(IsFastObjectElementsKind(object->GetElementsKind()));
}
// Increase backing store capacity if that's been decided previously.
@@ -12258,7 +12147,7 @@ Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode) {
ASSERT(object->HasDictionaryElements() ||
@@ -12268,7 +12157,7 @@ Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object,
// Insert element in the dictionary.
Handle<FixedArray> elements(FixedArray::cast(object->elements()));
bool is_arguments =
- (elements->map() == isolate->heap()->non_strict_arguments_elements_map());
+ (elements->map() == isolate->heap()->sloppy_arguments_elements_map());
Handle<SeededNumberDictionary> dictionary(is_arguments
? SeededNumberDictionary::cast(elements->get(1))
: SeededNumberDictionary::cast(*elements));
@@ -12290,7 +12179,7 @@ Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object,
attributes, NORMAL, details.dictionary_index());
dictionary->DetailsAtPut(entry, details);
} else if (details.IsReadOnly() && !element->IsTheHole()) {
- if (strict_mode == kNonStrictMode) {
+ if (strict_mode == SLOPPY) {
return isolate->factory()->undefined_value();
} else {
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
@@ -12328,7 +12217,7 @@ Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object,
// When we set the is_extensible flag to false we always force the
// element into dictionary mode (and force them to stay there).
if (!object->map()->is_extensible()) {
- if (strict_mode == kNonStrictMode) {
+ if (strict_mode == SLOPPY) {
return isolate->factory()->undefined_value();
} else {
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
@@ -12401,7 +12290,7 @@ Handle<Object> JSObject::SetFastDoubleElement(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype) {
ASSERT(object->HasFastDoubleElements());
@@ -12486,7 +12375,8 @@ Handle<Object> JSObject::SetFastDoubleElement(
// Otherwise default to slow case.
ASSERT(object->HasFastDoubleElements());
ASSERT(object->map()->has_fast_double_elements());
- ASSERT(object->elements()->IsFixedDoubleArray());
+ ASSERT(object->elements()->IsFixedDoubleArray() ||
+ object->elements()->length() == 0);
NormalizeElements(object);
ASSERT(object->HasDictionaryElements());
@@ -12498,7 +12388,7 @@ Handle<Object> JSReceiver::SetElement(Handle<JSReceiver> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
if (object->IsJSProxy()) {
return JSProxy::SetElementWithHandler(
Handle<JSProxy>::cast(object), object, index, value, strict_mode);
@@ -12511,7 +12401,7 @@ Handle<Object> JSReceiver::SetElement(Handle<JSReceiver> object,
Handle<Object> JSObject::SetOwnElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
ASSERT(!object->HasExternalArrayElements());
return JSObject::SetElement(object, index, value, NONE, strict_mode, false);
}
@@ -12521,12 +12411,13 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode) {
Isolate* isolate = object->GetIsolate();
- if (object->HasExternalArrayElements()) {
+ if (object->HasExternalArrayElements() ||
+ object->HasFixedTypedArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
bool has_exception;
Handle<Object> number =
@@ -12538,8 +12429,8 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(*object, index, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ if (!isolate->MayIndexedAccessWrapper(object, index, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_SET);
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return value;
}
@@ -12556,7 +12447,9 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
}
// Don't allow element properties to be redefined for external arrays.
- if (object->HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) {
+ if ((object->HasExternalArrayElements() ||
+ object->HasFixedTypedArrayElements()) &&
+ set_mode == DEFINE_PROPERTY) {
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<Object> args[] = { object, number };
Handle<Object> error = isolate->factory()->NewTypeError(
@@ -12572,7 +12465,7 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
dictionary->set_requires_slow_elements();
}
- if (!(FLAG_harmony_observation && object->map()->is_observed())) {
+ if (!object->map()->is_observed()) {
return object->HasIndexedInterceptor()
? SetElementWithInterceptor(object, index, value, attributes, strict_mode,
check_prototype,
@@ -12583,14 +12476,16 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
set_mode);
}
- PropertyAttributes old_attributes = object->GetLocalElementAttribute(index);
+ PropertyAttributes old_attributes =
+ JSReceiver::GetLocalElementAttribute(object, index);
Handle<Object> old_value = isolate->factory()->the_hole_value();
Handle<Object> old_length_handle;
Handle<Object> new_length_handle;
if (old_attributes != ABSENT) {
- if (object->GetLocalElementAccessorPair(index) == NULL)
- old_value = Object::GetElement(isolate, object, index);
+ if (object->GetLocalElementAccessorPair(index) == NULL) {
+ old_value = Object::GetElementNoExceptionThrown(isolate, object, index);
+ }
} else if (object->IsJSArray()) {
// Store old array length in case adding an element grows the array.
old_length_handle = handle(Handle<JSArray>::cast(object)->length(),
@@ -12609,7 +12504,7 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
Handle<String> name = isolate->factory()->Uint32ToString(index);
- PropertyAttributes new_attributes = object->GetLocalElementAttribute(index);
+ PropertyAttributes new_attributes = GetLocalElementAttribute(object, index);
if (old_attributes == ABSENT) {
if (object->IsJSArray() &&
!old_length_handle->SameValue(
@@ -12635,7 +12530,8 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
} else if (old_value->IsTheHole()) {
EnqueueChangeRecord(object, "reconfigure", name, old_value);
} else {
- Handle<Object> new_value = Object::GetElement(isolate, object, index);
+ Handle<Object> new_value =
+ Object::GetElementNoExceptionThrown(isolate, object, index);
bool value_changed = !old_value->SameValue(*new_value);
if (old_attributes != new_attributes) {
if (!value_changed) old_value = isolate->factory()->the_hole_value();
@@ -12654,7 +12550,7 @@ Handle<Object> JSObject::SetElementWithoutInterceptor(
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode) {
ASSERT(object->HasDictionaryElements() ||
@@ -12702,7 +12598,7 @@ Handle<Object> JSObject::SetElementWithoutInterceptor(
return SetDictionaryElement(object, index, value, attributes, strict_mode,
check_prototype,
set_mode);
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
Handle<FixedArray> parameter_map(FixedArray::cast(object->elements()));
uint32_t length = parameter_map->length();
Handle<Object> probe = index < length - 2 ?
@@ -12741,14 +12637,7 @@ Handle<Object> JSObject::SetElementWithoutInterceptor(
}
-void JSObject::TransitionElementsKind(Handle<JSObject> object,
- ElementsKind to_kind) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
- object->TransitionElementsKind(to_kind));
-}
-
-
-const double AllocationSite::kPretenureRatio = 0.60;
+const double AllocationSite::kPretenureRatio = 0.85;
void AllocationSite::ResetPretenureDecision() {
@@ -12779,11 +12668,13 @@ bool AllocationSite::IsNestedSite() {
}
-MaybeObject* AllocationSite::DigestTransitionFeedback(ElementsKind to_kind) {
- Isolate* isolate = GetIsolate();
+void AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
+ ElementsKind to_kind) {
+ Isolate* isolate = site->GetIsolate();
- if (SitePointsToLiteral() && transition_info()->IsJSArray()) {
- JSArray* transition_info = JSArray::cast(this->transition_info());
+ if (site->SitePointsToLiteral() && site->transition_info()->IsJSArray()) {
+ Handle<JSArray> transition_info =
+ handle(JSArray::cast(site->transition_info()));
ElementsKind kind = transition_info->GetElementsKind();
// if kind is holey ensure that to_kind is as well.
if (IsHoleyElementsKind(kind)) {
@@ -12796,22 +12687,21 @@ MaybeObject* AllocationSite::DigestTransitionFeedback(ElementsKind to_kind) {
CHECK(transition_info->length()->ToArrayIndex(&length));
if (length <= kMaximumArrayBytesToPretransition) {
if (FLAG_trace_track_allocation_sites) {
- bool is_nested = IsNestedSite();
+ bool is_nested = site->IsNestedSite();
PrintF(
"AllocationSite: JSArray %p boilerplate %s updated %s->%s\n",
- reinterpret_cast<void*>(this),
+ reinterpret_cast<void*>(*site),
is_nested ? "(nested)" : "",
ElementsKindToString(kind),
ElementsKindToString(to_kind));
}
- MaybeObject* result = transition_info->TransitionElementsKind(to_kind);
- if (result->IsFailure()) return result;
- dependent_code()->DeoptimizeDependentCodeGroup(
+ JSObject::TransitionElementsKind(transition_info, to_kind);
+ site->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
}
}
} else {
- ElementsKind kind = GetElementsKind();
+ ElementsKind kind = site->GetElementsKind();
// if kind is holey ensure that to_kind is as well.
if (IsHoleyElementsKind(kind)) {
to_kind = GetHoleyElementsKind(to_kind);
@@ -12819,16 +12709,15 @@ MaybeObject* AllocationSite::DigestTransitionFeedback(ElementsKind to_kind) {
if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
if (FLAG_trace_track_allocation_sites) {
PrintF("AllocationSite: JSArray %p site updated %s->%s\n",
- reinterpret_cast<void*>(this),
+ reinterpret_cast<void*>(*site),
ElementsKindToString(kind),
ElementsKindToString(to_kind));
}
- SetElementsKind(to_kind);
- dependent_code()->DeoptimizeDependentCodeGroup(
+ site->SetElementsKind(to_kind);
+ site->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
}
}
- return this;
}
@@ -12847,64 +12736,62 @@ void AllocationSite::AddDependentCompilationInfo(Handle<AllocationSite> site,
void JSObject::UpdateAllocationSite(Handle<JSObject> object,
ElementsKind to_kind) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
- object->UpdateAllocationSite(to_kind));
-}
-
+ if (!object->IsJSArray()) return;
-MaybeObject* JSObject::UpdateAllocationSite(ElementsKind to_kind) {
- if (!IsJSArray()) return this;
+ Heap* heap = object->GetHeap();
+ if (!heap->InNewSpace(*object)) return;
- Heap* heap = GetHeap();
- if (!heap->InNewSpace(this)) return this;
-
- // Check if there is potentially a memento behind the object. If
- // the last word of the momento is on another page we return
- // immediatelly.
- Address object_address = address();
- Address memento_address = object_address + JSArray::kSize;
- Address last_memento_word_address = memento_address + kPointerSize;
- if (!NewSpacePage::OnSamePage(object_address,
- last_memento_word_address)) {
- return this;
- }
+ Handle<AllocationSite> site;
+ {
+ DisallowHeapAllocation no_allocation;
+ // Check if there is potentially a memento behind the object. If
+ // the last word of the momento is on another page we return
+ // immediatelly.
+ Address object_address = object->address();
+ Address memento_address = object_address + JSArray::kSize;
+ Address last_memento_word_address = memento_address + kPointerSize;
+ if (!NewSpacePage::OnSamePage(object_address,
+ last_memento_word_address)) {
+ return;
+ }
- // Either object is the last object in the new space, or there is another
- // object of at least word size (the header map word) following it, so
- // suffices to compare ptr and top here.
- Address top = heap->NewSpaceTop();
- ASSERT(memento_address == top ||
- memento_address + HeapObject::kHeaderSize <= top);
- if (memento_address == top) return this;
+ // Either object is the last object in the new space, or there is another
+ // object of at least word size (the header map word) following it, so
+ // suffices to compare ptr and top here.
+ Address top = heap->NewSpaceTop();
+ ASSERT(memento_address == top ||
+ memento_address + HeapObject::kHeaderSize <= top);
+ if (memento_address == top) return;
- HeapObject* candidate = HeapObject::FromAddress(memento_address);
- if (candidate->map() != heap->allocation_memento_map()) return this;
+ HeapObject* candidate = HeapObject::FromAddress(memento_address);
+ if (candidate->map() != heap->allocation_memento_map()) return;
- AllocationMemento* memento = AllocationMemento::cast(candidate);
- if (!memento->IsValid()) return this;
+ AllocationMemento* memento = AllocationMemento::cast(candidate);
+ if (!memento->IsValid()) return;
- // Walk through to the Allocation Site
- AllocationSite* site = memento->GetAllocationSite();
- return site->DigestTransitionFeedback(to_kind);
+ // Walk through to the Allocation Site
+ site = handle(memento->GetAllocationSite());
+ }
+ AllocationSite::DigestTransitionFeedback(site, to_kind);
}
-MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
- ElementsKind from_kind = map()->elements_kind();
+void JSObject::TransitionElementsKind(Handle<JSObject> object,
+ ElementsKind to_kind) {
+ ElementsKind from_kind = object->map()->elements_kind();
if (IsFastHoleyElementsKind(from_kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
- if (from_kind == to_kind) return this;
+ if (from_kind == to_kind) return;
// Don't update the site if to_kind isn't fast
if (IsFastElementsKind(to_kind)) {
- MaybeObject* maybe_failure = UpdateAllocationSite(to_kind);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ UpdateAllocationSite(object, to_kind);
}
- Isolate* isolate = GetIsolate();
- if (elements() == isolate->heap()->empty_fixed_array() ||
+ Isolate* isolate = object->GetIsolate();
+ if (object->elements() == isolate->heap()->empty_fixed_array() ||
(IsFastSmiOrObjectElementsKind(from_kind) &&
IsFastSmiOrObjectElementsKind(to_kind)) ||
(from_kind == FAST_DOUBLE_ELEMENTS &&
@@ -12912,54 +12799,48 @@ MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
ASSERT(from_kind != TERMINAL_FAST_ELEMENTS_KIND);
// No change is needed to the elements() buffer, the transition
// only requires a map change.
- MaybeObject* maybe_new_map = GetElementsTransitionMap(isolate, to_kind);
- Map* new_map;
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- set_map(new_map);
+ Handle<Map> new_map = GetElementsTransitionMap(object, to_kind);
+ MigrateToMap(object, new_map);
if (FLAG_trace_elements_transitions) {
- FixedArrayBase* elms = FixedArrayBase::cast(elements());
- PrintElementsTransition(stdout, from_kind, elms, to_kind, elms);
+ Handle<FixedArrayBase> elms(object->elements());
+ PrintElementsTransition(stdout, object, from_kind, elms, to_kind, elms);
}
- return this;
+ return;
}
- FixedArrayBase* elms = FixedArrayBase::cast(elements());
+ Handle<FixedArrayBase> elms(object->elements());
uint32_t capacity = static_cast<uint32_t>(elms->length());
uint32_t length = capacity;
- if (IsJSArray()) {
- Object* raw_length = JSArray::cast(this)->length();
+ if (object->IsJSArray()) {
+ Object* raw_length = Handle<JSArray>::cast(object)->length();
if (raw_length->IsUndefined()) {
// If length is undefined, then JSArray is being initialized and has no
// elements, assume a length of zero.
length = 0;
} else {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
+ CHECK(raw_length->ToArrayIndex(&length));
}
}
if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
- MaybeObject* maybe_result =
- SetFastDoubleElementsCapacityAndLength(capacity, length);
- if (maybe_result->IsFailure()) return maybe_result;
- ValidateElements();
- return this;
+ SetFastDoubleElementsCapacityAndLength(object, capacity, length);
+ object->ValidateElements();
+ return;
}
if (IsFastDoubleElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
- MaybeObject* maybe_result = SetFastElementsCapacityAndLength(
- capacity, length, kDontAllowSmiElements);
- if (maybe_result->IsFailure()) return maybe_result;
- ValidateElements();
- return this;
+ SetFastElementsCapacityAndLength(object, capacity, length,
+ kDontAllowSmiElements);
+ object->ValidateElements();
+ return;
}
// This method should never be called for any other case than the ones
// handled above.
UNREACHABLE();
- return GetIsolate()->heap()->null_value();
}
@@ -13003,46 +12884,41 @@ MaybeObject* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index,
}
-MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
- uint32_t index) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
+Handle<Object> JSObject::GetElementWithInterceptor(Handle<JSObject> object,
+ Handle<Object> receiver,
+ uint32_t index) {
+ Isolate* isolate = object->GetIsolate();
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor(), isolate);
- Handle<Object> this_handle(receiver, isolate);
- Handle<JSObject> holder_handle(this, isolate);
+ Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor(), isolate);
if (!interceptor->getter()->IsUndefined()) {
v8::IndexedPropertyGetterCallback getter =
v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
+ ApiIndexedPropertyAccess("interceptor-indexed-get", *object, index));
PropertyCallbackArguments
- args(isolate, interceptor->data(), receiver, this);
+ args(isolate, interceptor->data(), *receiver, *object);
v8::Handle<v8::Value> result = args.Call(getter, index);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!result.IsEmpty()) {
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
result_internal->VerifyApiCallResultType();
- return *result_internal;
+ // Rebox handle before return.
+ return Handle<Object>(*result_internal, isolate);
}
}
- Heap* heap = holder_handle->GetHeap();
- ElementsAccessor* handler = holder_handle->GetElementsAccessor();
- MaybeObject* raw_result = handler->Get(*this_handle,
- *holder_handle,
- index);
- if (raw_result != heap->the_hole_value()) return raw_result;
-
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ ElementsAccessor* handler = object->GetElementsAccessor();
+ Handle<Object> result = handler->Get(receiver, object, index);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
+ if (!result->IsTheHole()) return result;
- Object* pt = holder_handle->GetPrototype();
- if (pt == heap->null_value()) return heap->undefined_value();
- return pt->GetElementWithReceiver(isolate, *this_handle, index);
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return isolate->factory()->undefined_value();
+ return Object::GetElementWithReceiver(isolate, proto, receiver, index);
}
@@ -13061,7 +12937,7 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
FixedArrayBase* backing_store_base = FixedArrayBase::cast(elements());
FixedArray* backing_store = NULL;
switch (GetElementsKind()) {
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
backing_store_base =
FixedArray::cast(FixedArray::cast(backing_store_base)->get(1));
backing_store = FixedArray::cast(backing_store_base);
@@ -13103,8 +12979,9 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
}
// Fall through if packing is not guaranteed.
case FAST_HOLEY_DOUBLE_ELEMENTS: {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
- *capacity = elms->length();
+ *capacity = elements()->length();
+ if (*capacity == 0) break;
+ FixedDoubleArray * elms = FixedDoubleArray::cast(elements());
for (int i = 0; i < *capacity; i++) {
if (!elms->is_the_hole(i)) ++(*used);
}
@@ -13128,6 +13005,21 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
}
+bool JSObject::WouldConvertToSlowElements(Handle<Object> key) {
+ uint32_t index;
+ if (HasFastElements() && key->ToArrayIndex(&index)) {
+ Handle<FixedArrayBase> backing_store(FixedArrayBase::cast(elements()));
+ uint32_t capacity = static_cast<uint32_t>(backing_store->length());
+ if (index >= capacity) {
+ if ((index - capacity) >= kMaxGap) return true;
+ uint32_t new_capacity = NewElementsCapacity(index + 1);
+ return ShouldConvertToSlowElements(new_capacity);
+ }
+ }
+ return false;
+}
+
+
bool JSObject::ShouldConvertToSlowElements(int new_capacity) {
STATIC_ASSERT(kMaxUncheckedOldFastElementsLength <=
kMaxUncheckedFastElementsLength);
@@ -13157,11 +13049,11 @@ bool JSObject::ShouldConvertToFastElements() {
if (IsAccessCheckNeeded()) return false;
// Observed objects may not go to fast mode because they rely on map checks,
// and for fast element accesses we sometimes check element kinds only.
- if (FLAG_harmony_observation && map()->is_observed()) return false;
+ if (map()->is_observed()) return false;
FixedArray* elements = FixedArray::cast(this->elements());
SeededNumberDictionary* dictionary = NULL;
- if (elements->map() == GetHeap()->non_strict_arguments_elements_map()) {
+ if (elements->map() == GetHeap()->sloppy_arguments_elements_map()) {
dictionary = SeededNumberDictionary::cast(elements->get(1));
} else {
dictionary = SeededNumberDictionary::cast(elements);
@@ -13187,6 +13079,7 @@ bool JSObject::ShouldConvertToFastElements() {
bool JSObject::ShouldConvertToFastDoubleElements(
bool* has_smi_only_elements) {
*has_smi_only_elements = false;
+ if (HasSloppyArgumentsElements()) return false;
if (FLAG_unbox_double_arrays) {
ASSERT(HasDictionaryElements());
SeededNumberDictionary* dictionary = element_dictionary();
@@ -13351,8 +13244,8 @@ bool JSObject::HasRealNamedProperty(Handle<JSObject> object,
SealHandleScope shs(isolate);
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
+ if (!isolate->MayNamedAccessWrapper(object, key, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_HAS);
return false;
}
}
@@ -13365,11 +13258,11 @@ bool JSObject::HasRealNamedProperty(Handle<JSObject> object,
bool JSObject::HasRealElementProperty(Handle<JSObject> object, uint32_t index) {
Isolate* isolate = object->GetIsolate();
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(*object, index, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
+ if (!isolate->MayIndexedAccessWrapper(object, index, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_HAS);
return false;
}
}
@@ -13382,8 +13275,8 @@ bool JSObject::HasRealElementProperty(Handle<JSObject> object, uint32_t index) {
return HasRealElementProperty(Handle<JSObject>::cast(proto), index);
}
- return object->GetElementAttributeWithoutInterceptor(
- *object, index, false) != ABSENT;
+ return GetElementAttributeWithoutInterceptor(
+ object, object, index, false) != ABSENT;
}
@@ -13393,8 +13286,8 @@ bool JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
SealHandleScope shs(isolate);
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
+ if (!isolate->MayNamedAccessWrapper(object, key, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_HAS);
return false;
}
}
@@ -13641,7 +13534,7 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
counter += element_dictionary()->NumberOfElementsFilterAttributes(filter);
break;
}
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ case SLOPPY_ARGUMENTS_ELEMENTS: {
FixedArray* parameter_map = FixedArray::cast(elements());
int mapped_length = parameter_map->length() - 2;
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
@@ -13739,11 +13632,11 @@ class StringSharedKey : public HashTableKey {
public:
StringSharedKey(String* source,
SharedFunctionInfo* shared,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position)
: source_(source),
shared_(shared),
- language_mode_(language_mode),
+ strict_mode_(strict_mode),
scope_position_(scope_position) { }
bool IsMatch(Object* other) {
@@ -13751,12 +13644,10 @@ class StringSharedKey : public HashTableKey {
FixedArray* other_array = FixedArray::cast(other);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
if (shared != shared_) return false;
- int language_unchecked = Smi::cast(other_array->get(2))->value();
- ASSERT(language_unchecked == CLASSIC_MODE ||
- language_unchecked == STRICT_MODE ||
- language_unchecked == EXTENDED_MODE);
- LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
- if (language_mode != language_mode_) return false;
+ int strict_unchecked = Smi::cast(other_array->get(2))->value();
+ ASSERT(strict_unchecked == SLOPPY || strict_unchecked == STRICT);
+ StrictMode strict_mode = static_cast<StrictMode>(strict_unchecked);
+ if (strict_mode != strict_mode_) return false;
int scope_position = Smi::cast(other_array->get(3))->value();
if (scope_position != scope_position_) return false;
String* source = String::cast(other_array->get(1));
@@ -13765,7 +13656,7 @@ class StringSharedKey : public HashTableKey {
static uint32_t StringSharedHashHelper(String* source,
SharedFunctionInfo* shared,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position) {
uint32_t hash = source->Hash();
if (shared->HasSourceCode()) {
@@ -13776,8 +13667,7 @@ class StringSharedKey : public HashTableKey {
// collection.
Script* script = Script::cast(shared->script());
hash ^= String::cast(script->source())->Hash();
- if (language_mode == STRICT_MODE) hash ^= 0x8000;
- if (language_mode == EXTENDED_MODE) hash ^= 0x0080;
+ if (strict_mode == STRICT) hash ^= 0x8000;
hash += scope_position;
}
return hash;
@@ -13785,21 +13675,19 @@ class StringSharedKey : public HashTableKey {
uint32_t Hash() {
return StringSharedHashHelper(
- source_, shared_, language_mode_, scope_position_);
+ source_, shared_, strict_mode_, scope_position_);
}
uint32_t HashForObject(Object* obj) {
FixedArray* other_array = FixedArray::cast(obj);
SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
String* source = String::cast(other_array->get(1));
- int language_unchecked = Smi::cast(other_array->get(2))->value();
- ASSERT(language_unchecked == CLASSIC_MODE ||
- language_unchecked == STRICT_MODE ||
- language_unchecked == EXTENDED_MODE);
- LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
+ int strict_unchecked = Smi::cast(other_array->get(2))->value();
+ ASSERT(strict_unchecked == SLOPPY || strict_unchecked == STRICT);
+ StrictMode strict_mode = static_cast<StrictMode>(strict_unchecked);
int scope_position = Smi::cast(other_array->get(3))->value();
return StringSharedHashHelper(
- source, shared, language_mode, scope_position);
+ source, shared, strict_mode, scope_position);
}
MUST_USE_RESULT MaybeObject* AsObject(Heap* heap) {
@@ -13810,7 +13698,7 @@ class StringSharedKey : public HashTableKey {
FixedArray* other_array = FixedArray::cast(obj);
other_array->set(0, shared_);
other_array->set(1, source_);
- other_array->set(2, Smi::FromInt(language_mode_));
+ other_array->set(2, Smi::FromInt(strict_mode_));
other_array->set(3, Smi::FromInt(scope_position_));
return other_array;
}
@@ -13818,7 +13706,7 @@ class StringSharedKey : public HashTableKey {
private:
String* source_;
SharedFunctionInfo* shared_;
- LanguageMode language_mode_;
+ StrictMode strict_mode_;
int scope_position_;
};
@@ -13991,7 +13879,7 @@ MaybeObject* HashTable<Shape, Key>::Allocate(Heap* heap,
? at_least_space_for
: ComputeCapacity(at_least_space_for);
if (capacity > HashTable::kMaxCapacity) {
- return Failure::OutOfMemoryException(0x10);
+ v8::internal::Heap::FatalProcessOutOfMemory("invalid table size", true);
}
Object* obj;
@@ -14454,8 +14342,11 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
uint32_t limit) {
Isolate* isolate = object->GetIsolate();
+ if (object->HasSloppyArgumentsElements() ||
+ object->map()->is_observed()) {
+ return handle(Smi::FromInt(-1), isolate);
+ }
- ASSERT(!object->map()->is_observed());
if (object->HasDictionaryElements()) {
// Convert to fast elements containing only the existing properties.
// Ordering is irrelevant, since we are going to sort anyway.
@@ -14477,10 +14368,11 @@ Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
object->ValidateElements();
object->set_map_and_elements(*new_map, *fast_elements);
- } else if (object->HasExternalArrayElements()) {
- // External arrays cannot have holes or undefined elements.
+ } else if (object->HasExternalArrayElements() ||
+ object->HasFixedTypedArrayElements()) {
+ // Typed arrays cannot have holes or undefined elements.
return handle(Smi::FromInt(
- ExternalArray::cast(object->elements())->length()), isolate);
+ FixedArrayBase::cast(object->elements())->length()), isolate);
} else if (!object->HasFastDoubleElements()) {
EnsureWritableFastElements(object);
}
@@ -14581,12 +14473,14 @@ ExternalArrayType JSTypedArray::type() {
switch (elements()->map()->instance_type()) {
#define INSTANCE_TYPE_TO_ARRAY_TYPE(Type, type, TYPE, ctype, size) \
case EXTERNAL_##TYPE##_ARRAY_TYPE: \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
return kExternal##Type##Array;
TYPED_ARRAYS(INSTANCE_TYPE_TO_ARRAY_TYPE)
#undef INSTANCE_TYPE_TO_ARRAY_TYPE
default:
+ UNREACHABLE();
return static_cast<ExternalArrayType>(-1);
}
}
@@ -15011,22 +14905,11 @@ MaybeObject* StringTable::LookupKey(HashTableKey* key, Object** s) {
}
-// The key for the script compilation cache is dependent on the mode flags,
-// because they change the global language mode and thus binding behaviour.
-// If flags change at some point, we must ensure that we do not hit the cache
-// for code compiled with different settings.
-static LanguageMode CurrentGlobalLanguageMode() {
- return FLAG_use_strict
- ? (FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE)
- : CLASSIC_MODE;
-}
-
-
Object* CompilationCacheTable::Lookup(String* src, Context* context) {
SharedFunctionInfo* shared = context->closure()->shared();
StringSharedKey key(src,
shared,
- CurrentGlobalLanguageMode(),
+ FLAG_use_strict ? STRICT : SLOPPY,
RelocInfo::kNoPosition);
int entry = FindEntry(&key);
if (entry == kNotFound) return GetHeap()->undefined_value();
@@ -15036,11 +14919,11 @@ Object* CompilationCacheTable::Lookup(String* src, Context* context) {
Object* CompilationCacheTable::LookupEval(String* src,
Context* context,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position) {
StringSharedKey key(src,
context->closure()->shared(),
- language_mode,
+ strict_mode,
scope_position);
int entry = FindEntry(&key);
if (entry == kNotFound) return GetHeap()->undefined_value();
@@ -15063,7 +14946,7 @@ MaybeObject* CompilationCacheTable::Put(String* src,
SharedFunctionInfo* shared = context->closure()->shared();
StringSharedKey key(src,
shared,
- CurrentGlobalLanguageMode(),
+ FLAG_use_strict ? STRICT : SLOPPY,
RelocInfo::kNoPosition);
CompilationCacheTable* cache;
MaybeObject* maybe_cache = EnsureCapacity(1, &key);
@@ -15087,7 +14970,7 @@ MaybeObject* CompilationCacheTable::PutEval(String* src,
int scope_position) {
StringSharedKey key(src,
context->closure()->shared(),
- value->language_mode(),
+ value->strict_mode(),
scope_position);
Object* obj;
{ MaybeObject* maybe_obj = EnsureCapacity(1, &key);
@@ -15516,8 +15399,7 @@ int Dictionary<Shape, Key>::NumberOfElementsFilterAttributes(
int result = 0;
for (int i = 0; i < capacity; i++) {
Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k) &&
- !FilterKey(k, filter)) {
+ if (HashTable<Shape, Key>::IsKey(k) && !FilterKey(k, filter)) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted()) continue;
PropertyAttributes attr = details.attributes();
@@ -15531,7 +15413,7 @@ int Dictionary<Shape, Key>::NumberOfElementsFilterAttributes(
template<typename Shape, typename Key>
int Dictionary<Shape, Key>::NumberOfEnumElements() {
return NumberOfElementsFilterAttributes(
- static_cast<PropertyAttributes>(DONT_ENUM));
+ static_cast<PropertyAttributes>(DONT_ENUM | SYMBOLIC));
}
@@ -15540,12 +15422,12 @@ void Dictionary<Shape, Key>::CopyKeysTo(
FixedArray* storage,
PropertyAttributes filter,
typename Dictionary<Shape, Key>::SortMode sort_mode) {
- ASSERT(storage->length() >= NumberOfEnumElements());
+ ASSERT(storage->length() >= NumberOfElementsFilterAttributes(filter));
int capacity = HashTable<Shape, Key>::Capacity();
int index = 0;
for (int i = 0; i < capacity; i++) {
Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k)) {
+ if (HashTable<Shape, Key>::IsKey(k) && !FilterKey(k, filter)) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted()) continue;
PropertyAttributes attr = details.attributes();
@@ -15559,45 +15441,38 @@ void Dictionary<Shape, Key>::CopyKeysTo(
}
-FixedArray* NameDictionary::CopyEnumKeysTo(FixedArray* storage) {
+struct EnumIndexComparator {
+ explicit EnumIndexComparator(NameDictionary* dict) : dict(dict) { }
+ bool operator() (Smi* a, Smi* b) {
+ PropertyDetails da(dict->DetailsAt(a->value()));
+ PropertyDetails db(dict->DetailsAt(b->value()));
+ return da.dictionary_index() < db.dictionary_index();
+ }
+ NameDictionary* dict;
+};
+
+
+void NameDictionary::CopyEnumKeysTo(FixedArray* storage) {
int length = storage->length();
- ASSERT(length >= NumberOfEnumElements());
- Heap* heap = GetHeap();
- Object* undefined_value = heap->undefined_value();
int capacity = Capacity();
int properties = 0;
-
- // Fill in the enumeration array by assigning enumerable keys at their
- // enumeration index. This will leave holes in the array if there are keys
- // that are deleted or not enumerable.
for (int i = 0; i < capacity; i++) {
Object* k = KeyAt(i);
if (IsKey(k) && !k->IsSymbol()) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted() || details.IsDontEnum()) continue;
+ storage->set(properties, Smi::FromInt(i));
properties++;
- storage->set(details.dictionary_index() - 1, k);
if (properties == length) break;
}
}
-
- // There are holes in the enumeration array if less properties were assigned
- // than the length of the array. If so, crunch all the existing properties
- // together by shifting them to the left (maintaining the enumeration order),
- // and trimming of the right side of the array.
- if (properties < length) {
- if (properties == 0) return heap->empty_fixed_array();
- properties = 0;
- for (int i = 0; i < length; ++i) {
- Object* value = storage->get(i);
- if (value != undefined_value) {
- storage->set(properties, value);
- ++properties;
- }
- }
- RightTrimFixedArray<FROM_MUTATOR>(heap, storage, length - properties);
+ EnumIndexComparator cmp(this);
+ Smi** start = reinterpret_cast<Smi**>(storage->GetFirstElementAddress());
+ std::sort(start, start + length, cmp);
+ for (int i = 0; i < length; i++) {
+ int index = Smi::cast(storage->get(i))->value();
+ storage->set(i, KeyAt(index));
}
- return storage;
}
@@ -15607,12 +15482,11 @@ void Dictionary<Shape, Key>::CopyKeysTo(
int index,
PropertyAttributes filter,
typename Dictionary<Shape, Key>::SortMode sort_mode) {
- ASSERT(storage->length() >= NumberOfElementsFilterAttributes(
- static_cast<PropertyAttributes>(NONE)));
+ ASSERT(storage->length() >= NumberOfElementsFilterAttributes(filter));
int capacity = HashTable<Shape, Key>::Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = HashTable<Shape, Key>::KeyAt(i);
- if (HashTable<Shape, Key>::IsKey(k)) {
+ if (HashTable<Shape, Key>::IsKey(k) && !FilterKey(k, filter)) {
PropertyDetails details = DetailsAt(i);
if (details.IsDeleted()) continue;
PropertyAttributes attr = details.attributes();
@@ -16445,6 +16319,65 @@ void JSTypedArray::Neuter() {
}
+static ElementsKind FixedToExternalElementsKind(ElementsKind elements_kind) {
+ switch (elements_kind) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: return EXTERNAL_##TYPE##_ELEMENTS;
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ default:
+ UNREACHABLE();
+ return FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND;
+ }
+}
+
+
+Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
+ Handle<JSTypedArray> typed_array) {
+
+ Handle<Map> map(typed_array->map());
+ Isolate* isolate = typed_array->GetIsolate();
+
+ ASSERT(IsFixedTypedArrayElementsKind(map->elements_kind()));
+
+ Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ Handle<FixedTypedArrayBase> fixed_typed_array(
+ FixedTypedArrayBase::cast(typed_array->elements()));
+ Runtime::SetupArrayBufferAllocatingData(isolate, buffer,
+ fixed_typed_array->DataSize(), false);
+ memcpy(buffer->backing_store(),
+ fixed_typed_array->DataPtr(),
+ fixed_typed_array->DataSize());
+ Handle<ExternalArray> new_elements =
+ isolate->factory()->NewExternalArray(
+ fixed_typed_array->length(), typed_array->type(),
+ static_cast<uint8_t*>(buffer->backing_store()));
+ Handle<Map> new_map = JSObject::GetElementsTransitionMap(
+ typed_array,
+ FixedToExternalElementsKind(map->elements_kind()));
+
+ buffer->set_weak_first_view(*typed_array);
+ ASSERT(typed_array->weak_next() == isolate->heap()->undefined_value());
+ typed_array->set_buffer(*buffer);
+ typed_array->set_map_and_elements(*new_map, *new_elements);
+
+ return buffer;
+}
+
+
+Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
+ Handle<Object> result(buffer(), GetIsolate());
+ if (*result != Smi::FromInt(0)) {
+ ASSERT(IsExternalArrayElementsKind(map()->elements_kind()));
+ return Handle<JSArrayBuffer>::cast(result);
+ }
+ Handle<JSTypedArray> self(this);
+ return MaterializeArrayBuffer(self);
+}
+
+
HeapType* PropertyCell::type() {
return static_cast<HeapType*>(type_raw());
}
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 1b4075250..e3ed08c4d 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -37,7 +37,9 @@
#include "property-details.h"
#include "smart-pointers.h"
#include "unicode-inl.h"
-#if V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_ARM64
+#include "arm64/constants-arm64.h"
+#elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/constants-mips.h"
@@ -932,7 +934,6 @@ class MaybeObject BASE_EMBEDDED {
public:
inline bool IsFailure();
inline bool IsRetryAfterGC();
- inline bool IsOutOfMemory();
inline bool IsException();
INLINE(bool IsTheHole());
INLINE(bool IsUninitialized());
@@ -1038,7 +1039,6 @@ class MaybeObject BASE_EMBEDDED {
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
V(DependentCode) \
- V(TypeFeedbackCells) \
V(FixedArray) \
V(FixedDoubleArray) \
V(ConstantPoolArray) \
@@ -1129,6 +1129,9 @@ class MaybeObject BASE_EMBEDDED {
V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
V(kCompoundAssignmentToLookupSlot, "Compound assignment to lookup slot") \
V(kContextAllocatedArguments, "Context-allocated arguments") \
+ V(kCopyBuffersOverlap, "Copy buffers overlap") \
+ V(kCouldNotGenerateZero, "Could not generate +0.0") \
+ V(kCouldNotGenerateNegativeZero, "Could not generate -0.0") \
V(kDebuggerIsActive, "Debugger is active") \
V(kDebuggerStatement, "DebuggerStatement") \
V(kDeclarationInCatchContext, "Declaration in catch context") \
@@ -1141,18 +1144,34 @@ class MaybeObject BASE_EMBEDDED {
"DontDelete cells can't contain the hole") \
V(kDoPushArgumentNotImplementedForDoubleType, \
"DoPushArgument not implemented for double type") \
+ V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed") \
V(kEmitLoadRegisterUnsupportedDoubleImmediate, \
"EmitLoadRegister: Unsupported double immediate") \
V(kEval, "eval") \
V(kExpected0AsASmiSentinel, "Expected 0 as a Smi sentinel") \
- V(kExpectedAlignmentMarker, "expected alignment marker") \
- V(kExpectedAllocationSite, "expected allocation site") \
- V(kExpectedPropertyCellInRegisterA2, \
- "Expected property cell in register a2") \
- V(kExpectedPropertyCellInRegisterEbx, \
- "Expected property cell in register ebx") \
- V(kExpectedPropertyCellInRegisterRbx, \
- "Expected property cell in register rbx") \
+ V(kExpectedAlignmentMarker, "Expected alignment marker") \
+ V(kExpectedAllocationSite, "Expected allocation site") \
+ V(kExpectedFunctionObject, "Expected function object in register") \
+ V(kExpectedHeapNumber, "Expected HeapNumber") \
+ V(kExpectedNativeContext, "Expected native context") \
+ V(kExpectedNonIdenticalObjects, "Expected non-identical objects") \
+ V(kExpectedNonNullContext, "Expected non-null context") \
+ V(kExpectedPositiveZero, "Expected +0.0") \
+ V(kExpectedAllocationSiteInCell, \
+ "Expected AllocationSite in property cell") \
+ V(kExpectedFixedArrayInFeedbackVector, \
+ "Expected fixed array in feedback vector") \
+ V(kExpectedFixedArrayInRegisterA2, \
+ "Expected fixed array in register a2") \
+ V(kExpectedFixedArrayInRegisterEbx, \
+ "Expected fixed array in register ebx") \
+ V(kExpectedFixedArrayInRegisterR2, \
+ "Expected fixed array in register r2") \
+ V(kExpectedFixedArrayInRegisterRbx, \
+ "Expected fixed array in register rbx") \
+ V(kExpectedSmiOrHeapNumber, "Expected smi or HeapNumber") \
+ V(kExpectedUndefinedOrCell, \
+ "Expected undefined or cell in register") \
V(kExpectingAlignmentForCopyBytes, \
"Expecting alignment for CopyBytes") \
V(kExportDeclaration, "Export declaration") \
@@ -1197,6 +1216,7 @@ class MaybeObject BASE_EMBEDDED {
V(kInliningBailedOut, "Inlining bailed out") \
V(kInputGPRIsExpectedToHaveUpper32Cleared, \
"Input GPR is expected to have upper32 cleared") \
+ V(kInputStringTooLong, "Input string too long") \
V(kInstanceofStubUnexpectedCallSiteCacheCheck, \
"InstanceofStub unexpected call site cache (check)") \
V(kInstanceofStubUnexpectedCallSiteCacheCmp1, \
@@ -1210,6 +1230,7 @@ class MaybeObject BASE_EMBEDDED {
V(kInvalidCaptureReferenced, "Invalid capture referenced") \
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
+ V(kInvalidFullCodegenState, "invalid full-codegen state") \
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
V(kInvalidLeftHandSideInAssignment, "Invalid left-hand side in assignment") \
V(kInvalidLhsInCompoundAssignment, "Invalid lhs in compound assignment") \
@@ -1222,7 +1243,10 @@ class MaybeObject BASE_EMBEDDED {
V(kJSObjectWithFastElementsMapHasSlowElements, \
"JSObject with fast elements map has slow elements") \
V(kLetBindingReInitialization, "Let binding re-initialization") \
+ V(kLhsHasBeenClobbered, "lhs has been clobbered") \
V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size") \
+ V(kLiveEditFrameDroppingIsNotSupportedOnARM64, \
+ "LiveEdit frame dropping is not supported on arm64") \
V(kLiveEditFrameDroppingIsNotSupportedOnArm, \
"LiveEdit frame dropping is not supported on arm") \
V(kLiveEditFrameDroppingIsNotSupportedOnMips, \
@@ -1258,6 +1282,7 @@ class MaybeObject BASE_EMBEDDED {
"Object literal with complex property") \
V(kOddballInStringTableIsNotUndefinedOrTheHole, \
"Oddball in string table is not undefined or the hole") \
+ V(kOffsetOutOfRange, "Offset out of range") \
V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
V(kOperandIsASmi, "Operand is a smi") \
@@ -1273,6 +1298,7 @@ class MaybeObject BASE_EMBEDDED {
"Out of virtual registers while trying to allocate temp register") \
V(kParseScopeError, "Parse/scope error") \
V(kPossibleDirectCallToEval, "Possible direct call to eval") \
+ V(kPreconditionsWereNotMet, "Preconditions were not met") \
V(kPropertyAllocationCountFailed, "Property allocation count failed") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
V(kReferenceToAVariableWhichRequiresDynamicLookup, \
@@ -1282,24 +1308,40 @@ class MaybeObject BASE_EMBEDDED {
V(kReferenceToUninitializedVariable, "Reference to uninitialized variable") \
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kRegisterWasClobbered, "Register was clobbered") \
+ V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
+ V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
+ V(kRhsHasBeenClobbered, "Rhs has been clobbered") \
V(kScopedBlock, "ScopedBlock") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
+ V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kSwitchStatementMixedOrNonLiteralSwitchLabels, \
"SwitchStatement: mixed or non-literal switch labels") \
V(kSwitchStatementTooManyClauses, "SwitchStatement: too many clauses") \
+ V(kTheCurrentStackPointerIsBelowCsp, \
+ "The current stack pointer is below csp") \
V(kTheInstructionShouldBeALui, "The instruction should be a lui") \
V(kTheInstructionShouldBeAnOri, "The instruction should be an ori") \
V(kTheInstructionToPatchShouldBeALoadFromPc, \
"The instruction to patch should be a load from pc") \
+ V(kTheInstructionToPatchShouldBeALoadFromPp, \
+ "The instruction to patch should be a load from pp") \
+ V(kTheInstructionToPatchShouldBeAnLdrLiteral, \
+ "The instruction to patch should be a ldr literal") \
V(kTheInstructionToPatchShouldBeALui, \
"The instruction to patch should be a lui") \
V(kTheInstructionToPatchShouldBeAnOri, \
"The instruction to patch should be an ori") \
+ V(kTheSourceAndDestinationAreTheSame, \
+ "The source and destination are the same") \
+ V(kTheStackPointerIsNotAligned, "The stack pointer is not aligned.") \
+ V(kTheStackWasCorruptedByMacroAssemblerCall, \
+ "The stack was corrupted by MacroAssembler::Call()") \
V(kTooManyParametersLocals, "Too many parameters/locals") \
V(kTooManyParameters, "Too many parameters") \
V(kTooManySpillSlotsNeededForOSR, "Too many spill slots needed for OSR") \
+ V(kToOperand32UnsupportedImmediate, "ToOperand32 unsupported immediate.") \
V(kToOperandIsDoubleRegisterUnimplemented, \
"ToOperand IsDoubleRegister unimplemented") \
V(kToOperandUnsupportedDoubleImmediate, \
@@ -1308,10 +1350,12 @@ class MaybeObject BASE_EMBEDDED {
V(kTryFinallyStatement, "TryFinallyStatement") \
V(kUnableToEncodeValueAsSmi, "Unable to encode value as smi") \
V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
+ V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
V(kUndefinedValueNotLoaded, "Undefined value not loaded") \
V(kUndoAllocationOfNonAllocatedMemory, \
"Undo allocation of non allocated memory") \
V(kUnexpectedAllocationTop, "Unexpected allocation top") \
+ V(kUnexpectedColorFound, "Unexpected color bit pattern found") \
V(kUnexpectedElementsKindInArrayConstructor, \
"Unexpected ElementsKind in array constructor") \
V(kUnexpectedFallthroughFromCharCodeAtSlowCase, \
@@ -1338,16 +1382,20 @@ class MaybeObject BASE_EMBEDDED {
"Unexpected initial map for InternalArray function") \
V(kUnexpectedLevelAfterReturnFromApiCall, \
"Unexpected level after return from api call") \
+ V(kUnexpectedNegativeValue, "Unexpected negative value") \
V(kUnexpectedNumberOfPreAllocatedPropertyFields, \
"Unexpected number of pre-allocated property fields") \
+ V(kUnexpectedSmi, "Unexpected smi value") \
V(kUnexpectedStringFunction, "Unexpected String function") \
V(kUnexpectedStringType, "Unexpected string type") \
V(kUnexpectedStringWrapperInstanceSize, \
"Unexpected string wrapper instance size") \
V(kUnexpectedTypeForRegExpDataFixedArrayExpected, \
"Unexpected type for RegExp data, FixedArray expected") \
+ V(kUnexpectedValue, "Unexpected value") \
V(kUnexpectedUnusedPropertiesOfStringWrapper, \
"Unexpected unused properties of string wrapper") \
+ V(kUnimplemented, "unimplemented") \
V(kUninitializedKSmiConstantRegister, "Uninitialized kSmiConstantRegister") \
V(kUnknown, "Unknown") \
V(kUnsupportedConstCompoundAssignment, \
@@ -1487,6 +1535,8 @@ class Object : public MaybeObject {
// Converts this to a Smi if possible.
// Failure is returned otherwise.
+ static MUST_USE_RESULT inline Handle<Object> ToSmi(Isolate* isolate,
+ Handle<Object> object);
MUST_USE_RESULT inline MaybeObject* ToSmi();
void Lookup(Name* name, LookupResult* result);
@@ -1530,16 +1580,20 @@ class Object : public MaybeObject {
MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
JSReceiver* getter);
- static Handle<Object> GetElement(Isolate* isolate,
- Handle<Object> object,
- uint32_t index);
- MUST_USE_RESULT inline MaybeObject* GetElement(Isolate* isolate,
- uint32_t index);
+ static inline Handle<Object> GetElement(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index);
+
// For use when we know that no exception can be thrown.
- inline Object* GetElementNoExceptionThrown(Isolate* isolate, uint32_t index);
- MUST_USE_RESULT MaybeObject* GetElementWithReceiver(Isolate* isolate,
- Object* receiver,
- uint32_t index);
+ static inline Handle<Object> GetElementNoExceptionThrown(
+ Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index);
+
+ static Handle<Object> GetElementWithReceiver(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> receiver,
+ uint32_t index);
// Return the object's prototype (might be Heap::null_value()).
Object* GetPrototype(Isolate* isolate);
@@ -1675,15 +1729,11 @@ class Failure: public MaybeObject {
inline AllocationSpace allocation_space() const;
inline bool IsInternalError() const;
- inline bool IsOutOfMemoryException() const;
static inline Failure* RetryAfterGC(AllocationSpace space);
static inline Failure* RetryAfterGC(); // NEW_SPACE
static inline Failure* Exception();
static inline Failure* InternalError();
- // TODO(jkummerow): The value is temporary instrumentation. Remove it
- // when it has served its purpose.
- static inline Failure* OutOfMemoryException(intptr_t value);
// Casting.
static inline Failure* cast(MaybeObject* object);
@@ -1848,6 +1898,8 @@ class HeapObject: public Object {
inline void IteratePointers(ObjectVisitor* v, int start, int end);
// as above, for the single element at "offset"
inline void IteratePointer(ObjectVisitor* v, int offset);
+ // as above, for the next code link of a code object.
+ inline void IterateNextCodeLink(ObjectVisitor* v, int offset);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(HeapObject);
@@ -1998,14 +2050,14 @@ class JSReceiver: public HeapObject {
Handle<Name> key,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
StoreFromKeyed store_mode =
MAY_BE_STORE_FROM_KEYED);
static Handle<Object> SetElement(Handle<JSReceiver> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
// Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
static inline bool HasProperty(Handle<JSReceiver> object, Handle<Name> name);
@@ -2031,13 +2083,23 @@ class JSReceiver: public HeapObject {
// function that was used to instantiate the object).
String* constructor_name();
- inline PropertyAttributes GetPropertyAttribute(Name* name);
- PropertyAttributes GetPropertyAttributeWithReceiver(JSReceiver* receiver,
- Name* name);
- PropertyAttributes GetLocalPropertyAttribute(Name* name);
+ static inline PropertyAttributes GetPropertyAttribute(
+ Handle<JSReceiver> object,
+ Handle<Name> name);
+ static PropertyAttributes GetPropertyAttributeWithReceiver(
+ Handle<JSReceiver> object,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name);
+ static PropertyAttributes GetLocalPropertyAttribute(
+ Handle<JSReceiver> object,
+ Handle<Name> name);
- inline PropertyAttributes GetElementAttribute(uint32_t index);
- inline PropertyAttributes GetLocalElementAttribute(uint32_t index);
+ static inline PropertyAttributes GetElementAttribute(
+ Handle<JSReceiver> object,
+ uint32_t index);
+ static inline PropertyAttributes GetLocalElementAttribute(
+ Handle<JSReceiver> object,
+ uint32_t index);
// Return the object's prototype (might be Heap::null_value()).
inline Object* GetPrototype();
@@ -2068,17 +2130,19 @@ class JSReceiver: public HeapObject {
Handle<Object> value);
private:
- PropertyAttributes GetPropertyAttributeForResult(JSReceiver* receiver,
- LookupResult* result,
- Name* name,
- bool continue_search);
+ static PropertyAttributes GetPropertyAttributeForResult(
+ Handle<JSReceiver> object,
+ Handle<JSReceiver> receiver,
+ LookupResult* result,
+ Handle<Name> name,
+ bool continue_search);
static Handle<Object> SetProperty(Handle<JSReceiver> receiver,
LookupResult* result,
Handle<Name> key,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
StoreFromKeyed store_from_keyed);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
@@ -2110,14 +2174,14 @@ class JSObject: public JSReceiver {
// In the fast mode elements is a FixedArray and so each element can
// be quickly accessed. This fact is used in the generated code. The
// elements array can have one of three maps in this mode:
- // fixed_array_map, non_strict_arguments_elements_map or
+ // fixed_array_map, sloppy_arguments_elements_map or
// fixed_cow_array_map (for copy-on-write arrays). In the latter case
// the elements array may be shared by a few objects and so before
// writing to any element the array must be copied. Use
// EnsureWritableFastElements in this case.
//
// In the slow mode the elements is either a NumberDictionary, an
- // ExternalArray, or a FixedArray parameter map for a (non-strict)
+ // ExternalArray, or a FixedArray parameter map for a (sloppy)
// arguments object.
DECL_ACCESSORS(elements, FixedArrayBase)
inline void initialize_elements();
@@ -2139,7 +2203,7 @@ class JSObject: public JSReceiver {
// Returns true if an object has elements of FAST_HOLEY_*_ELEMENTS
// ElementsKind.
inline bool HasFastHoleyElements();
- inline bool HasNonStrictArgumentsElements();
+ inline bool HasSloppyArgumentsElements();
inline bool HasDictionaryElements();
inline bool HasExternalUint8ClampedElements();
@@ -2155,6 +2219,17 @@ class JSObject: public JSReceiver {
inline bool HasFixedTypedArrayElements();
+ inline bool HasFixedUint8ClampedElements();
+ inline bool HasFixedArrayElements();
+ inline bool HasFixedInt8Elements();
+ inline bool HasFixedUint8Elements();
+ inline bool HasFixedInt16Elements();
+ inline bool HasFixedUint16Elements();
+ inline bool HasFixedInt32Elements();
+ inline bool HasFixedUint32Elements();
+ inline bool HasFixedFloat32Elements();
+ inline bool HasFixedFloat64Elements();
+
bool HasFastArgumentsElements();
bool HasDictionaryArgumentsElements();
inline SeededNumberDictionary* element_dictionary(); // Gets slow elements.
@@ -2191,14 +2266,14 @@ class JSObject: public JSReceiver {
Handle<Name> name,
Handle<Object> value,
Handle<JSObject> holder,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
static Handle<Object> SetPropertyWithInterceptor(
Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
static Handle<Object> SetPropertyForResult(
Handle<JSObject> object,
@@ -2206,7 +2281,7 @@ class JSObject: public JSReceiver {
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
static Handle<Object> SetLocalPropertyIgnoreAttributes(
@@ -2240,12 +2315,12 @@ class JSObject: public JSReceiver {
// Retrieve a value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
- Object* GetNormalizedProperty(LookupResult* result);
+ Object* GetNormalizedProperty(const LookupResult* result);
// Sets the property value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
static void SetNormalizedProperty(Handle<JSObject> object,
- LookupResult* result,
+ const LookupResult* result,
Handle<Object> value);
// Sets the property value in a normalized object given (key, value, details).
@@ -2262,20 +2337,26 @@ class JSObject: public JSReceiver {
InterceptorInfo* GetIndexedInterceptor();
// Used from JSReceiver.
- PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver,
- Name* name,
- bool continue_search);
- PropertyAttributes GetPropertyAttributeWithInterceptor(JSObject* receiver,
- Name* name,
- bool continue_search);
- PropertyAttributes GetPropertyAttributeWithFailedAccessCheck(
- Object* receiver,
+ static PropertyAttributes GetPropertyAttributePostInterceptor(
+ Handle<JSObject> object,
+ Handle<JSObject> receiver,
+ Handle<Name> name,
+ bool continue_search);
+ static PropertyAttributes GetPropertyAttributeWithInterceptor(
+ Handle<JSObject> object,
+ Handle<JSObject> receiver,
+ Handle<Name> name,
+ bool continue_search);
+ static PropertyAttributes GetPropertyAttributeWithFailedAccessCheck(
+ Handle<JSObject> object,
LookupResult* result,
- Name* name,
+ Handle<Name> name,
+ bool continue_search);
+ static PropertyAttributes GetElementAttributeWithReceiver(
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
bool continue_search);
- PropertyAttributes GetElementAttributeWithReceiver(JSReceiver* receiver,
- uint32_t index,
- bool continue_search);
// Retrieves an AccessorPair property from the given object. Might return
// undefined if the property doesn't exist or is of a different kind.
@@ -2316,10 +2397,6 @@ class JSObject: public JSReceiver {
// been modified since it was created. May give false positives.
bool IsDirty();
- // If the receiver is a JSGlobalProxy this method will return its prototype,
- // otherwise the result is the receiver itself.
- inline Object* BypassGlobalProxy();
-
// Accessors for hidden properties object.
//
// Hidden properties are not local properties of the object itself.
@@ -2343,7 +2420,7 @@ class JSObject: public JSReceiver {
static void DeleteHiddenProperty(Handle<JSObject> object,
Handle<Name> key);
// Returns true if the object has a property with the hidden string as name.
- bool HasHiddenProperties();
+ static bool HasHiddenProperties(Handle<JSObject> object);
static void SetIdentityHash(Handle<JSObject> object, Handle<Smi> hash);
@@ -2353,20 +2430,26 @@ class JSObject: public JSReceiver {
static inline void EnsureCanContainHeapObjectElements(Handle<JSObject> obj);
// Makes sure that this object can contain the specified elements.
- MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements(
+ static inline void EnsureCanContainElements(
+ Handle<JSObject> object,
Object** elements,
uint32_t count,
EnsureElementsMode mode);
- MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements(
- FixedArrayBase* elements,
+ static inline void EnsureCanContainElements(
+ Handle<JSObject> object,
+ Handle<FixedArrayBase> elements,
uint32_t length,
EnsureElementsMode mode);
- MUST_USE_RESULT MaybeObject* EnsureCanContainElements(
+ static void EnsureCanContainElements(
+ Handle<JSObject> object,
Arguments* arguments,
uint32_t first_arg,
uint32_t arg_count,
EnsureElementsMode mode);
+ // Would we convert a fast elements array to dictionary mode given
+ // an access at key?
+ bool WouldConvertToSlowElements(Handle<Object> key);
// Do we want to keep the elements in fast case when increasing the
// capacity?
bool ShouldConvertToSlowElements(int new_capacity);
@@ -2392,13 +2475,13 @@ class JSObject: public JSReceiver {
static Handle<Object> SetFastElement(Handle<JSObject> object, uint32_t index,
Handle<Object> value,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype);
static Handle<Object> SetOwnElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
// Empty handle is returned if the element cannot be set to the given value.
static Handle<Object> SetElement(
@@ -2406,14 +2489,15 @@ class JSObject: public JSReceiver {
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype = true,
SetPropertyMode set_mode = SET_PROPERTY);
// Returns the index'th element.
// The undefined object if index is out of bounds.
- MUST_USE_RESULT MaybeObject* GetElementWithInterceptor(Object* receiver,
- uint32_t index);
+ static Handle<Object> GetElementWithInterceptor(Handle<JSObject> object,
+ Handle<Object> receiver,
+ uint32_t index);
enum SetFastElementsCapacitySmiMode {
kAllowSmiElements,
@@ -2421,15 +2505,11 @@ class JSObject: public JSReceiver {
kDontAllowSmiElements
};
- static Handle<FixedArray> SetFastElementsCapacityAndLength(
- Handle<JSObject> object,
- int capacity,
- int length,
- SetFastElementsCapacitySmiMode smi_mode);
// Replace the elements' backing store with fast elements of the given
// capacity. Update the length for JSArrays. Returns the new backing
// store.
- MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(
+ static Handle<FixedArray> SetFastElementsCapacityAndLength(
+ Handle<JSObject> object,
int capacity,
int length,
SetFastElementsCapacitySmiMode smi_mode);
@@ -2505,8 +2585,6 @@ class JSObject: public JSReceiver {
static void TransitionElementsKind(Handle<JSObject> object,
ElementsKind to_kind);
- MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
-
// TODO(mstarzinger): Both public because of ConvertAnsSetLocalProperty().
static void MigrateToMap(Handle<JSObject> object, Handle<Map> new_map);
static void GeneralizeFieldRepresentation(Handle<JSObject> object,
@@ -2527,8 +2605,6 @@ class JSObject: public JSReceiver {
static Handle<SeededNumberDictionary> NormalizeElements(
Handle<JSObject> object);
- MUST_USE_RESULT MaybeObject* NormalizeElements();
-
// Transform slow named properties to fast variants.
static void TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields);
@@ -2600,9 +2676,10 @@ class JSObject: public JSReceiver {
void PrintTransitions(FILE* out = stdout);
#endif
- void PrintElementsTransition(
- FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
- ElementsKind to_kind, FixedArrayBase* to_elements);
+ static void PrintElementsTransition(
+ FILE* file, Handle<JSObject> object,
+ ElementsKind from_kind, Handle<FixedArrayBase> from_elements,
+ ElementsKind to_kind, Handle<FixedArrayBase> to_elements);
void PrintInstanceMigration(FILE* file, Map* original_map, Map* new_map);
@@ -2699,7 +2776,6 @@ class JSObject: public JSReceiver {
static void UpdateAllocationSite(Handle<JSObject> object,
ElementsKind to_kind);
- MUST_USE_RESULT MaybeObject* UpdateAllocationSite(ElementsKind to_kind);
// Used from Object::GetProperty().
static Handle<Object> GetPropertyWithFailedAccessCheck(
@@ -2713,12 +2789,14 @@ class JSObject: public JSReceiver {
Object* structure,
uint32_t index,
Object* holder);
- MUST_USE_RESULT PropertyAttributes GetElementAttributeWithInterceptor(
- JSReceiver* receiver,
+ static PropertyAttributes GetElementAttributeWithInterceptor(
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver,
uint32_t index,
bool continue_search);
- MUST_USE_RESULT PropertyAttributes GetElementAttributeWithoutInterceptor(
- JSReceiver* receiver,
+ static PropertyAttributes GetElementAttributeWithoutInterceptor(
+ Handle<JSObject> object,
+ Handle<JSReceiver> receiver,
uint32_t index,
bool continue_search);
static Handle<Object> SetElementWithCallback(
@@ -2727,13 +2805,13 @@ class JSObject: public JSReceiver {
uint32_t index,
Handle<Object> value,
Handle<JSObject> holder,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
static Handle<Object> SetElementWithInterceptor(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode);
static Handle<Object> SetElementWithoutInterceptor(
@@ -2741,7 +2819,7 @@ class JSObject: public JSReceiver {
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode);
static Handle<Object> SetElementWithCallbackSetterInPrototypes(
@@ -2749,20 +2827,20 @@ class JSObject: public JSReceiver {
uint32_t index,
Handle<Object> value,
bool* found,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
static Handle<Object> SetDictionaryElement(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype,
SetPropertyMode set_mode = SET_PROPERTY);
static Handle<Object> SetFastDoubleElement(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool check_prototype = true);
// Searches the prototype chain for property 'name'. If it is found and
@@ -2774,14 +2852,14 @@ class JSObject: public JSReceiver {
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool* done);
static Handle<Object> SetPropertyPostInterceptor(
Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
static Handle<Object> SetPropertyUsingTransition(
Handle<JSObject> object,
LookupResult* lookup,
@@ -2794,7 +2872,7 @@ class JSObject: public JSReceiver {
Handle<Name> name,
Handle<Object> value,
bool check_prototype,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
// Add a property to an object.
static Handle<Object> AddProperty(
@@ -2802,7 +2880,7 @@ class JSObject: public JSReceiver {
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
ValueType value_type = OPTIMAL_REPRESENTATION,
@@ -2830,15 +2908,6 @@ class JSObject: public JSReceiver {
ValueType value_type,
TransitionFlag flag);
- // Add a property to a fast-case object using a map transition to
- // new_map.
- static void AddFastPropertyUsingMap(Handle<JSObject> object,
- Handle<Map> new_map,
- Handle<Name> name,
- Handle<Object> value,
- int field_index,
- Representation representation);
-
// Add a property to a slow-case object.
static void AddSlowProperty(Handle<JSObject> object,
Handle<Name> name,
@@ -2875,7 +2944,7 @@ class JSObject: public JSReceiver {
// Gets the current elements capacity and the number of used elements.
void GetElementsCapacityAndUsage(int* capacity, int* used);
- bool CanSetCallback(Name* name);
+ static bool CanSetCallback(Handle<JSObject> object, Handle<Name> name);
static void SetElementCallback(Handle<JSObject> object,
uint32_t index,
Handle<Object> structure,
@@ -3068,6 +3137,8 @@ class FixedDoubleArray: public FixedArrayBase {
inline double get_scalar(int index);
inline int64_t get_representation(int index);
MUST_USE_RESULT inline MaybeObject* get(int index);
+ // TODO(ishell): Rename as get() once all usages handlified.
+ inline Handle<Object> get_as_handle(int index);
inline void set(int index, double value);
inline void set_the_hole(int index);
@@ -3114,29 +3185,35 @@ class FixedDoubleArray: public FixedArrayBase {
// ConstantPoolArray describes a fixed-sized array containing constant pool
// entires.
// The format of the pool is:
-// [0]: Field holding the first index which is a pointer entry
-// [1]: Field holding the first index which is a int32 entry
-// [2] ... [first_ptr_index() - 1]: 64 bit entries
-// [first_ptr_index()] ... [first_int32_index() - 1]: pointer entries
-// [first_int32_index()] ... [length - 1]: 32 bit entries
+// [0]: Field holding the first index which is a raw code target pointer entry
+// [1]: Field holding the first index which is a heap pointer entry
+// [2]: Field holding the first index which is a int32 entry
+// [3] ... [first_code_ptr_index() - 1] : 64 bit entries
+// [first_code_ptr_index()] ... [first_heap_ptr_index() - 1] : code pointers
+// [first_heap_ptr_index()] ... [first_int32_index() - 1] : heap pointers
+// [first_int32_index()] ... [length - 1] : 32 bit entries
class ConstantPoolArray: public FixedArrayBase {
public:
// Getters for the field storing the first index for different type entries.
- inline int first_ptr_index();
+ inline int first_code_ptr_index();
+ inline int first_heap_ptr_index();
inline int first_int64_index();
inline int first_int32_index();
// Getters for counts of different type entries.
- inline int count_of_ptr_entries();
+ inline int count_of_code_ptr_entries();
+ inline int count_of_heap_ptr_entries();
inline int count_of_int64_entries();
inline int count_of_int32_entries();
// Setter and getter for pool elements.
- inline Object* get_ptr_entry(int index);
+ inline Address get_code_ptr_entry(int index);
+ inline Object* get_heap_ptr_entry(int index);
inline int64_t get_int64_entry(int index);
inline int32_t get_int32_entry(int index);
inline double get_int64_entry_as_double(int index);
+ inline void set(int index, Address value);
inline void set(int index, Object* value);
inline void set(int index, int64_t value);
inline void set(int index, double value);
@@ -3144,7 +3221,8 @@ class ConstantPoolArray: public FixedArrayBase {
// Set up initial state.
inline void SetEntryCounts(int number_of_int64_entries,
- int number_of_ptr_entries,
+ int number_of_code_ptr_entries,
+ int number_of_heap_ptr_entries,
int number_of_int32_entries);
// Copy operations
@@ -3152,10 +3230,12 @@ class ConstantPoolArray: public FixedArrayBase {
// Garbage collection support.
inline static int SizeFor(int number_of_int64_entries,
- int number_of_ptr_entries,
+ int number_of_code_ptr_entries,
+ int number_of_heap_ptr_entries,
int number_of_int32_entries) {
return RoundUp(OffsetAt(number_of_int64_entries,
- number_of_ptr_entries,
+ number_of_code_ptr_entries,
+ number_of_heap_ptr_entries,
number_of_int32_entries),
kPointerSize);
}
@@ -3164,22 +3244,33 @@ class ConstantPoolArray: public FixedArrayBase {
inline int OffsetOfElementAt(int index) {
ASSERT(index < length());
if (index >= first_int32_index()) {
- return OffsetAt(count_of_int64_entries(), count_of_ptr_entries(),
- index - first_int32_index());
- } else if (index >= first_ptr_index()) {
- return OffsetAt(count_of_int64_entries(), index - first_ptr_index(), 0);
+ return OffsetAt(count_of_int64_entries(), count_of_code_ptr_entries(),
+ count_of_heap_ptr_entries(), index - first_int32_index());
+ } else if (index >= first_heap_ptr_index()) {
+ return OffsetAt(count_of_int64_entries(), count_of_code_ptr_entries(),
+ index - first_heap_ptr_index(), 0);
+ } else if (index >= first_code_ptr_index()) {
+ return OffsetAt(count_of_int64_entries(), index - first_code_ptr_index(),
+ 0, 0);
} else {
- return OffsetAt(index, 0, 0);
+ return OffsetAt(index, 0, 0, 0);
}
}
// Casting.
static inline ConstantPoolArray* cast(Object* obj);
+ // Garbage collection support.
+ Object** RawFieldOfElementAt(int index) {
+ return HeapObject::RawField(this, OffsetOfElementAt(index));
+ }
+
// Layout description.
- static const int kFirstPointerIndexOffset = FixedArray::kHeaderSize;
+ static const int kFirstCodePointerIndexOffset = FixedArray::kHeaderSize;
+ static const int kFirstHeapPointerIndexOffset =
+ kFirstCodePointerIndexOffset + kPointerSize;
static const int kFirstInt32IndexOffset =
- kFirstPointerIndexOffset + kPointerSize;
+ kFirstHeapPointerIndexOffset + kPointerSize;
static const int kFirstOffset = kFirstInt32IndexOffset + kPointerSize;
// Dispatched behavior.
@@ -3189,15 +3280,18 @@ class ConstantPoolArray: public FixedArrayBase {
DECLARE_VERIFIER(ConstantPoolArray)
private:
- inline void set_first_ptr_index(int value);
+ inline void set_first_code_ptr_index(int value);
+ inline void set_first_heap_ptr_index(int value);
inline void set_first_int32_index(int value);
inline static int OffsetAt(int number_of_int64_entries,
- int number_of_ptr_entries,
+ int number_of_code_ptr_entries,
+ int number_of_heap_ptr_entries,
int number_of_int32_entries) {
return kFirstOffset
+ (number_of_int64_entries * kInt64Size)
- + (number_of_ptr_entries * kPointerSize)
+ + (number_of_code_ptr_entries * kPointerSize)
+ + (number_of_heap_ptr_entries * kPointerSize)
+ (number_of_int32_entries * kInt32Size);
}
@@ -3958,7 +4052,7 @@ class NameDictionary: public Dictionary<NameDictionaryShape, Name*> {
}
// Copies enumerable keys to preallocated fixed array.
- FixedArray* CopyEnumKeysTo(FixedArray* storage);
+ void CopyEnumKeysTo(FixedArray* storage);
static void DoGenerateNewEnumerationIndices(
Handle<NameDictionary> dictionary);
@@ -4272,13 +4366,11 @@ class ScopeInfo : public FixedArray {
// Does this scope call eval?
bool CallsEval();
- // Return the language mode of this scope.
- LanguageMode language_mode();
+ // Return the strict mode of this scope.
+ StrictMode strict_mode();
- // Does this scope make a non-strict eval call?
- bool CallsNonStrictEval() {
- return CallsEval() && (language_mode() == CLASSIC_MODE);
- }
+ // Does this scope make a sloppy eval call?
+ bool CallsSloppyEval() { return CallsEval() && strict_mode() == SLOPPY; }
// Return the total number of locals allocated on the stack and in the
// context. This includes the parameters that are allocated in the context.
@@ -4452,9 +4544,9 @@ class ScopeInfo : public FixedArray {
// Properties of scopes.
class ScopeTypeField: public BitField<ScopeType, 0, 3> {};
class CallsEvalField: public BitField<bool, 3, 1> {};
- class LanguageModeField: public BitField<LanguageMode, 4, 2> {};
- class FunctionVariableField: public BitField<FunctionVariableInfo, 6, 2> {};
- class FunctionVariableMode: public BitField<VariableMode, 8, 3> {};
+ class StrictModeField: public BitField<StrictMode, 4, 1> {};
+ class FunctionVariableField: public BitField<FunctionVariableInfo, 5, 2> {};
+ class FunctionVariableMode: public BitField<VariableMode, 7, 3> {};
// BitFields representing the encoded information for context locals in the
// ContextLocalInfoEntries part.
@@ -4879,6 +4971,11 @@ class FixedTypedArrayBase: public FixedArrayBase {
inline int size();
+ // Use with care: returns raw pointer into heap.
+ inline void* DataPtr();
+
+ inline int DataSize();
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArrayBase);
};
@@ -4905,6 +5002,9 @@ class FixedTypedArray: public FixedTypedArrayBase {
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, ElementType value);
+ static inline ElementType from_int(int value);
+ static inline ElementType from_double(double value);
+
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
@@ -4927,7 +5027,7 @@ class FixedTypedArray: public FixedTypedArrayBase {
static const InstanceType kInstanceType = FIXED_##TYPE##_ARRAY_TYPE; \
static const char* Designator() { return #type " array"; } \
static inline MaybeObject* ToObject(Heap* heap, elementType scalar); \
- static elementType defaultValue() { return 0; } \
+ static inline elementType defaultValue(); \
}; \
\
typedef FixedTypedArray<Type##ArrayTraits> Fixed##Type##Array;
@@ -4951,7 +5051,9 @@ class DeoptimizationInputData: public FixedArray {
static const int kLiteralArrayIndex = 2;
static const int kOsrAstIdIndex = 3;
static const int kOsrPcOffsetIndex = 4;
- static const int kFirstDeoptEntryIndex = 5;
+ static const int kOptimizationIdIndex = 5;
+ static const int kSharedFunctionInfoIndex = 6;
+ static const int kFirstDeoptEntryIndex = 7;
// Offsets of deopt entry elements relative to the start of the entry.
static const int kAstIdRawOffset = 0;
@@ -4974,6 +5076,8 @@ class DeoptimizationInputData: public FixedArray {
DEFINE_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
DEFINE_ELEMENT_ACCESSORS(OsrAstId, Smi)
DEFINE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
+ DEFINE_ELEMENT_ACCESSORS(OptimizationId, Smi)
+ DEFINE_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
#undef DEFINE_ELEMENT_ACCESSORS
@@ -5069,49 +5173,6 @@ class DeoptimizationOutputData: public FixedArray {
// Forward declaration.
class Cell;
class PropertyCell;
-
-// TypeFeedbackCells is a fixed array used to hold the association between
-// cache cells and AST ids for code generated by the full compiler.
-// The format of the these objects is
-// [i * 2]: Global property cell of ith cache cell.
-// [i * 2 + 1]: Ast ID for ith cache cell.
-class TypeFeedbackCells: public FixedArray {
- public:
- int CellCount() { return length() / 2; }
- static int LengthOfFixedArray(int cell_count) { return cell_count * 2; }
-
- // Accessors for AST ids associated with cache values.
- inline TypeFeedbackId AstId(int index);
- inline void SetAstId(int index, TypeFeedbackId id);
-
- // Accessors for global property cells holding the cache values.
- inline Cell* GetCell(int index);
- inline void SetCell(int index, Cell* cell);
-
- // The object that indicates an uninitialized cache.
- static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
-
- // The object that indicates a megamorphic state.
- static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
-
- // The object that indicates a monomorphic state of Array with
- // ElementsKind
- static inline Handle<Object> MonomorphicArraySentinel(Isolate* isolate,
- ElementsKind elements_kind);
-
- // A raw version of the uninitialized sentinel that's safe to read during
- // garbage collection (e.g., for patching the cache).
- static inline Object* RawUninitializedSentinel(Heap* heap);
-
- // Casting.
- static inline TypeFeedbackCells* cast(Object* obj);
-
- static const int kForInFastCaseMarker = 0;
- static const int kForInSlowCaseMarker = 1;
-};
-
-
-// Forward declaration.
class SafepointEntry;
class TypeFeedbackInfo;
@@ -5192,7 +5253,6 @@ class Code: public HeapObject {
// the kind of the code object.
// FUNCTION => type feedback information.
// STUB => various things, e.g. a SMI
- // OPTIMIZED_FUNCTION => the next_code_link for optimized code list.
DECL_ACCESSORS(raw_type_feedback_info, Object)
inline Object* type_feedback_info();
inline void set_type_feedback_info(
@@ -5230,24 +5290,10 @@ class Code: public HeapObject {
// [flags]: Access to specific code flags.
inline Kind kind();
- inline Kind handler_kind() {
- return static_cast<Kind>(arguments_count());
- }
inline InlineCacheState ic_state(); // Only valid for IC stubs.
inline ExtraICState extra_ic_state(); // Only valid for IC stubs.
- inline ExtraICState extended_extra_ic_state(); // Only valid for
- // non-call IC stubs.
- static bool needs_extended_extra_ic_state(Kind kind) {
- // TODO(danno): This is a bit of a hack right now since there are still
- // clients of this API that pass "extra" values in for argc. These clients
- // should be retrofitted to used ExtendedExtraICState.
- return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC ||
- kind == BINARY_OP_IC;
- }
-
inline StubType type(); // Only valid for monomorphic IC stubs.
- inline int arguments_count(); // Only valid for call IC stubs.
// Testers for IC stub kinds.
inline bool is_inline_cache_stub();
@@ -5262,6 +5308,7 @@ class Code: public HeapObject {
inline bool is_compare_nil_ic_stub() { return kind() == COMPARE_NIL_IC; }
inline bool is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
inline bool is_keyed_stub();
+ inline bool is_optimized_code() { return kind() == OPTIMIZED_FUNCTION; }
inline void set_raw_kind_specific_flags1(int value);
inline void set_raw_kind_specific_flags2(int value);
@@ -5348,7 +5395,6 @@ class Code: public HeapObject {
// Find an object in a stub with a specified map
Object* FindNthObject(int n, Map* match_map);
- void ReplaceNthObject(int n, Map* match_map, Object* replace_with);
// Find the first allocation site in an IC stub.
AllocationSite* FindFirstAllocationSite();
@@ -5357,7 +5403,6 @@ class Code: public HeapObject {
Map* FindFirstMap();
void FindAllMaps(MapHandleList* maps);
void FindAllTypes(TypeHandleList* types);
- void ReplaceFirstMap(Map* replace);
// Find the first handler in an IC stub.
Code* FindFirstHandler();
@@ -5369,7 +5414,12 @@ class Code: public HeapObject {
// Find the first name in an IC stub.
Name* FindFirstName();
- void ReplaceNthCell(int n, Cell* replace_with);
+ class FindAndReplacePattern;
+ // For each (map-to-find, object-to-replace) pair in the pattern, this
+ // function replaces the corresponding placeholder in the code with the
+ // object-to-replace. The function assumes that pairs in the pattern come in
+ // the same order as the placeholders in the code.
+ void FindAndReplace(const FindAndReplacePattern& pattern);
// The entire code object including its header is copied verbatim to the
// snapshot so that it can be written in one, fast, memcpy during
@@ -5386,23 +5436,24 @@ class Code: public HeapObject {
InlineCacheState ic_state = UNINITIALIZED,
ExtraICState extra_ic_state = kNoExtraICState,
StubType type = NORMAL,
- int argc = -1,
InlineCacheHolderFlag holder = OWN_MAP);
static inline Flags ComputeMonomorphicFlags(
Kind kind,
ExtraICState extra_ic_state = kNoExtraICState,
InlineCacheHolderFlag holder = OWN_MAP,
+ StubType type = NORMAL);
+
+ static inline Flags ComputeHandlerFlags(
+ Kind handler_kind,
StubType type = NORMAL,
- int argc = -1);
+ InlineCacheHolderFlag holder = OWN_MAP);
static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
static inline StubType ExtractTypeFromFlags(Flags flags);
static inline Kind ExtractKindFromFlags(Flags flags);
static inline InlineCacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
- static inline ExtraICState ExtractExtendedExtraICStateFromFlags(Flags flags);
- static inline int ExtractArgumentsCountFromFlags(Flags flags);
static inline Flags RemoveTypeFromFlags(Flags flags);
@@ -5472,7 +5523,7 @@ class Code: public HeapObject {
void ClearInlineCaches();
void ClearInlineCaches(Kind kind);
- void ClearTypeFeedbackCells(Heap* heap);
+ void ClearTypeFeedbackInfo(Heap* heap);
BailoutId TranslatePcOffsetToAstId(uint32_t pc_offset);
uint32_t TranslateAstIdToPcOffset(BailoutId ast_id);
@@ -5516,7 +5567,11 @@ class Code: public HeapObject {
void VerifyEmbeddedObjectsDependency();
#endif
- static bool IsWeakEmbeddedObject(Kind kind, Object* object);
+ inline bool IsWeakObject(Object* object) {
+ return is_optimized_code() && IsWeakObjectInOptimizedCode(object);
+ }
+
+ inline bool IsWeakObjectInOptimizedCode(Object* object);
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
@@ -5530,8 +5585,8 @@ class Code: public HeapObject {
kHandlerTableOffset + kPointerSize;
static const int kTypeFeedbackInfoOffset =
kDeoptimizationDataOffset + kPointerSize;
- static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset; // Shared.
- static const int kGCMetadataOffset = kTypeFeedbackInfoOffset + kPointerSize;
+ static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset + kPointerSize;
+ static const int kGCMetadataOffset = kNextCodeLinkOffset + kPointerSize;
static const int kICAgeOffset =
kGCMetadataOffset + kPointerSize;
static const int kFlagsOffset = kICAgeOffset + kIntSize;
@@ -5567,10 +5622,8 @@ class Code: public HeapObject {
class CacheHolderField: public BitField<InlineCacheHolderFlag, 5, 1> {};
class KindField: public BitField<Kind, 6, 4> {};
// TODO(bmeurer): Bit 10 is available for free use. :-)
- class ExtraICStateField: public BitField<ExtraICState, 11, 6> {};
- class ExtendedExtraICStateField: public BitField<ExtraICState, 11,
+ class ExtraICStateField: public BitField<ExtraICState, 11,
PlatformSmiTagging::kSmiValueSize - 11 + 1> {}; // NOLINT
- STATIC_ASSERT(ExtraICStateField::kShift == ExtendedExtraICStateField::kShift);
// KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
static const int kStackSlotsFirstBit = 0;
@@ -5624,26 +5677,16 @@ class Code: public HeapObject {
class BackEdgesPatchedForOSRField: public BitField<bool,
kIsCrankshaftedBit + 1 + 29, 1> {}; // NOLINT
- // Signed field cannot be encoded using the BitField class.
- static const int kArgumentsCountShift = 17;
- static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1);
- static const int kArgumentsBits =
- PlatformSmiTagging::kSmiValueSize - Code::kArgumentsCountShift + 1;
+ static const int kArgumentsBits = 16;
static const int kMaxArguments = (1 << kArgumentsBits) - 1;
- // ICs can use either argument count or ExtendedExtraIC, since their storage
- // overlaps.
- STATIC_ASSERT(ExtraICStateField::kShift +
- ExtraICStateField::kSize + kArgumentsBits ==
- ExtendedExtraICStateField::kShift +
- ExtendedExtraICStateField::kSize);
-
// This constant should be encodable in an ARM instruction.
static const int kFlagsNotUsedInLookup =
TypeField::kMask | CacheHolderField::kMask;
private:
friend class RelocIterator;
+ friend class Deoptimizer; // For FindCodeAgeSequence.
void ClearInlineCaches(Kind* kind);
@@ -5926,8 +5969,8 @@ class Map: public HeapObject {
return IsFastElementsKind(elements_kind());
}
- inline bool has_non_strict_arguments_elements() {
- return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+ inline bool has_sloppy_arguments_elements() {
+ return elements_kind() == SLOPPY_ARGUMENTS_ELEMENTS;
}
inline bool has_external_array_elements() {
@@ -5944,7 +5987,7 @@ class Map: public HeapObject {
inline bool has_slow_elements_kind() {
return elements_kind() == DICTIONARY_ELEMENTS
- || elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+ || elements_kind() == SLOPPY_ARGUMENTS_ELEMENTS;
}
static bool IsValidElementsTransition(ElementsKind from_kind,
@@ -6203,8 +6246,11 @@ class Map: public HeapObject {
Descriptor* descriptor,
int index,
TransitionFlag flag);
+
MUST_USE_RESULT MaybeObject* AsElementsKind(ElementsKind kind);
+ static Handle<Map> AsElementsKind(Handle<Map> map, ElementsKind kind);
+
MUST_USE_RESULT MaybeObject* CopyAsElementsKind(ElementsKind kind,
TransitionFlag flag);
@@ -6528,9 +6574,6 @@ class Script: public Struct {
// extracted.
DECL_ACCESSORS(column_offset, Smi)
- // [data]: additional data associated with this script.
- DECL_ACCESSORS(data, Object)
-
// [context_data]: context data for the context this script was compiled in.
DECL_ACCESSORS(context_data, Object)
@@ -6584,8 +6627,7 @@ class Script: public Struct {
static const int kNameOffset = kSourceOffset + kPointerSize;
static const int kLineOffsetOffset = kNameOffset + kPointerSize;
static const int kColumnOffsetOffset = kLineOffsetOffset + kPointerSize;
- static const int kDataOffset = kColumnOffsetOffset + kPointerSize;
- static const int kContextOffset = kDataOffset + kPointerSize;
+ static const int kContextOffset = kColumnOffsetOffset + kPointerSize;
static const int kWrapperOffset = kContextOffset + kPointerSize;
static const int kTypeOffset = kWrapperOffset + kPointerSize;
static const int kLineEndsOffset = kTypeOffset + kPointerSize;
@@ -6643,7 +6685,9 @@ enum BuiltinFunctionId {
#undef DECLARE_FUNCTION_ID
// Fake id for a special case of Math.pow. Note, it continues the
// list of math functions.
- kMathPowHalf
+ kMathPowHalf,
+ // Installed only on --harmony-maths.
+ kMathClz32
};
@@ -6701,13 +6745,6 @@ class SharedFunctionInfo: public HeapObject {
static const int kLiteralsOffset = 2;
static const int kOsrAstIdOffset = 3;
static const int kEntryLength = 4;
- static const int kFirstContextSlot = FixedArray::kHeaderSize +
- (kEntriesStart + kContextOffset) * kPointerSize;
- static const int kFirstCodeSlot = FixedArray::kHeaderSize +
- (kEntriesStart + kCachedCodeOffset) * kPointerSize;
- static const int kFirstOsrAstIdSlot = FixedArray::kHeaderSize +
- (kEntriesStart + kOsrAstIdOffset) * kPointerSize;
- static const int kSecondEntryIndex = kEntryLength + kEntriesStart;
static const int kInitialLength = kEntriesStart + kEntryLength;
// [scope_info]: Scope info.
@@ -6926,20 +6963,9 @@ class SharedFunctionInfo: public HeapObject {
// spending time attempting to optimize it again.
DECL_BOOLEAN_ACCESSORS(optimization_disabled)
- // Indicates the language mode of the function's code as defined by the
- // current harmony drafts for the next ES language standard. Possible
- // values are:
- // 1. CLASSIC_MODE - Unrestricted syntax and semantics, same as in ES5.
- // 2. STRICT_MODE - Restricted syntax and semantics, same as in ES5.
- // 3. EXTENDED_MODE - Only available under the harmony flag, not part of ES5.
- inline LanguageMode language_mode();
- inline void set_language_mode(LanguageMode language_mode);
-
- // Indicates whether the language mode of this function is CLASSIC_MODE.
- inline bool is_classic_mode();
-
- // Indicates whether the language mode of this function is EXTENDED_MODE.
- inline bool is_extended_mode();
+ // Indicates the language mode.
+ inline StrictMode strict_mode();
+ inline void set_strict_mode(StrictMode strict_mode);
// False if the function definitely does not allocate an arguments object.
DECL_BOOLEAN_ACCESSORS(uses_arguments)
@@ -7188,7 +7214,6 @@ class SharedFunctionInfo: public HeapObject {
kLiveObjectsMayExist,
kOptimizationDisabled,
kStrictModeFunction,
- kExtendedModeFunction,
kUsesArguments,
kHasDuplicateParameters,
kNative,
@@ -7233,26 +7258,18 @@ class SharedFunctionInfo: public HeapObject {
static const int kStrictModeBitWithinByte =
(kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
- static const int kExtendedModeBitWithinByte =
- (kExtendedModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
-
static const int kNativeBitWithinByte =
(kNative + kCompilerHintsSmiTagSize) % kBitsPerByte;
#if __BYTE_ORDER == __LITTLE_ENDIAN
static const int kStrictModeByteOffset = kCompilerHintsOffset +
(kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
- static const int kExtendedModeByteOffset = kCompilerHintsOffset +
- (kExtendedModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
static const int kNativeByteOffset = kCompilerHintsOffset +
(kNative + kCompilerHintsSmiTagSize) / kBitsPerByte;
#elif __BYTE_ORDER == __BIG_ENDIAN
static const int kStrictModeByteOffset = kCompilerHintsOffset +
(kCompilerHintsSize - 1) -
((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
- static const int kExtendedModeByteOffset = kCompilerHintsOffset +
- (kCompilerHintsSize - 1) -
- ((kExtendedModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
static const int kNativeByteOffset = kCompilerHintsOffset +
(kCompilerHintsSize - 1) -
((kNative + kCompilerHintsSmiTagSize) / kBitsPerByte);
@@ -7399,9 +7416,6 @@ class JSFunction: public JSObject {
void MarkForConcurrentOptimization();
void MarkInOptimizationQueue();
- static bool CompileOptimized(Handle<JSFunction> function,
- ClearExceptionFlag flag);
-
// Tells whether or not the function is already marked for lazy
// recompilation.
inline bool IsMarkedForOptimization();
@@ -7803,9 +7817,6 @@ class JSMessageObject: public JSObject {
// [script]: the script from which the error message originated.
DECL_ACCESSORS(script, Object)
- // [stack_trace]: the stack trace for this error message.
- DECL_ACCESSORS(stack_trace, Object)
-
// [stack_frames]: an array of stack frames for this error object.
DECL_ACCESSORS(stack_frames, Object)
@@ -7828,8 +7839,7 @@ class JSMessageObject: public JSObject {
static const int kTypeOffset = JSObject::kHeaderSize;
static const int kArgumentsOffset = kTypeOffset + kPointerSize;
static const int kScriptOffset = kArgumentsOffset + kPointerSize;
- static const int kStackTraceOffset = kScriptOffset + kPointerSize;
- static const int kStackFramesOffset = kStackTraceOffset + kPointerSize;
+ static const int kStackFramesOffset = kScriptOffset + kPointerSize;
static const int kStartPositionOffset = kStackFramesOffset + kPointerSize;
static const int kEndPositionOffset = kStartPositionOffset + kPointerSize;
static const int kSize = kEndPositionOffset + kPointerSize;
@@ -8010,7 +8020,7 @@ class CompilationCacheTable: public HashTable<CompilationCacheShape,
Object* Lookup(String* src, Context* context);
Object* LookupEval(String* src,
Context* context,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position);
Object* LookupRegExp(String* source, JSRegExp::Flags flags);
MUST_USE_RESULT MaybeObject* Put(String* src,
@@ -8188,7 +8198,7 @@ class TypeFeedbackInfo: public Struct {
inline void set_inlined_type_change_checksum(int checksum);
inline bool matches_inlined_type_change_checksum(int checksum);
- DECL_ACCESSORS(type_feedback_cells, TypeFeedbackCells)
+ DECL_ACCESSORS(feedback_vector, FixedArray)
static inline TypeFeedbackInfo* cast(Object* obj);
@@ -8198,8 +8208,27 @@ class TypeFeedbackInfo: public Struct {
static const int kStorage1Offset = HeapObject::kHeaderSize;
static const int kStorage2Offset = kStorage1Offset + kPointerSize;
- static const int kTypeFeedbackCellsOffset = kStorage2Offset + kPointerSize;
- static const int kSize = kTypeFeedbackCellsOffset + kPointerSize;
+ static const int kFeedbackVectorOffset =
+ kStorage2Offset + kPointerSize;
+ static const int kSize = kFeedbackVectorOffset + kPointerSize;
+
+ // The object that indicates an uninitialized cache.
+ static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
+
+ // The object that indicates a megamorphic state.
+ static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
+
+ // The object that indicates a monomorphic state of Array with
+ // ElementsKind
+ static inline Handle<Object> MonomorphicArraySentinel(Isolate* isolate,
+ ElementsKind elements_kind);
+
+ // A raw version of the uninitialized sentinel that's safe to read during
+ // garbage collection (e.g., for patching the cache).
+ static inline Object* RawUninitializedSentinel(Heap* heap);
+
+ static const int kForInFastCaseMarker = 0;
+ static const int kForInSlowCaseMarker = 1;
private:
static const int kTypeChangeChecksumBits = 7;
@@ -8262,8 +8291,9 @@ class AllocationSite: public Struct {
class DoNotInlineBit: public BitField<bool, 29, 1> {};
// Bitfields for pretenure_data
- class MementoFoundCountBits: public BitField<int, 0, 28> {};
- class PretenureDecisionBits: public BitField<PretenureDecision, 28, 2> {};
+ class MementoFoundCountBits: public BitField<int, 0, 27> {};
+ class PretenureDecisionBits: public BitField<PretenureDecision, 27, 2> {};
+ class DeoptDependentCodeBit: public BitField<bool, 29, 1> {};
STATIC_ASSERT(PretenureDecisionBits::kMax >= kLastPretenureDecisionValue);
// Increments the mementos found counter and returns true when the first
@@ -8288,6 +8318,18 @@ class AllocationSite: public Struct {
SKIP_WRITE_BARRIER);
}
+ bool deopt_dependent_code() {
+ int value = pretenure_data()->value();
+ return DeoptDependentCodeBit::decode(value);
+ }
+
+ void set_deopt_dependent_code(bool deopt) {
+ int value = pretenure_data()->value();
+ set_pretenure_data(
+ Smi::FromInt(DeoptDependentCodeBit::update(value, deopt)),
+ SKIP_WRITE_BARRIER);
+ }
+
int memento_found_count() {
int value = pretenure_data()->value();
return MementoFoundCountBits::decode(value);
@@ -8345,7 +8387,8 @@ class AllocationSite: public Struct {
return transition_info()->IsJSArray() || transition_info()->IsJSObject();
}
- MaybeObject* DigestTransitionFeedback(ElementsKind to_kind);
+ static void DigestTransitionFeedback(Handle<AllocationSite> site,
+ ElementsKind to_kind);
enum Reason {
TENURING,
@@ -8421,8 +8464,8 @@ class AllocationMemento: public Struct {
};
-// Representation of a slow alias as part of a non-strict arguments objects.
-// For fast aliases (if HasNonStrictArgumentsElements()):
+// Representation of a slow alias as part of a sloppy arguments objects.
+// For fast aliases (if HasSloppyArgumentsElements()):
// - the parameter map contains an index into the context
// - all attributes of the element have default values
// For slow aliases (if HasDictionaryArgumentsElements()):
@@ -8627,7 +8670,7 @@ class Name: public HeapObject {
// kMaxCachedArrayIndexLength.
STATIC_CHECK(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
- static const int kContainsCachedArrayIndexMask =
+ static const unsigned int kContainsCachedArrayIndexMask =
(~kMaxCachedArrayIndexLength << kArrayIndexHashLengthShift) |
kIsNotArrayIndexMask;
@@ -8876,7 +8919,7 @@ class String: public Name {
static const int kEmptyStringHash = kIsNotArrayIndexMask;
// Maximal string length.
- static const int kMaxLength = (1 << (32 - 2)) - 1;
+ static const int kMaxLength = (1 << 28) - 16;
// Max length for computing hash. For strings longer than this limit the
// string length is used as the hash value.
@@ -9038,9 +9081,7 @@ class SeqOneByteString: public SeqString {
// Maximal memory usage for a single sequential ASCII string.
static const int kMaxSize = 512 * MB - 1;
- // Maximal length of a single sequential ASCII string.
- // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
- static const int kMaxLength = (kMaxSize - kHeaderSize);
+ STATIC_CHECK((kMaxSize - kHeaderSize) >= String::kMaxLength);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqOneByteString);
@@ -9080,9 +9121,8 @@ class SeqTwoByteString: public SeqString {
// Maximal memory usage for a single sequential two-byte string.
static const int kMaxSize = 512 * MB - 1;
- // Maximal length of a single sequential two-byte string.
- // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
- static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t);
+ STATIC_CHECK(static_cast<int>((kMaxSize - kHeaderSize)/sizeof(uint16_t)) >=
+ String::kMaxLength);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqTwoByteString);
@@ -9600,14 +9640,16 @@ class JSProxy: public JSReceiver {
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
bool* done);
- MUST_USE_RESULT PropertyAttributes GetPropertyAttributeWithHandler(
- JSReceiver* receiver,
- Name* name);
- MUST_USE_RESULT PropertyAttributes GetElementAttributeWithHandler(
- JSReceiver* receiver,
+ static PropertyAttributes GetPropertyAttributeWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name);
+ static PropertyAttributes GetElementAttributeWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
uint32_t index);
// Turn the proxy into an (empty) JSObject.
@@ -9651,12 +9693,12 @@ class JSProxy: public JSReceiver {
Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
static Handle<Object> SetElementWithHandler(Handle<JSProxy> proxy,
Handle<JSReceiver> receiver,
uint32_t index,
Handle<Object> value,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
static bool HasPropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name);
static bool HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index);
@@ -9898,6 +9940,8 @@ class JSTypedArray: public JSArrayBufferView {
ExternalArrayType type();
size_t element_size();
+ Handle<JSArrayBuffer> GetBuffer();
+
// Dispatched behavior.
DECLARE_PRINTER(JSTypedArray)
DECLARE_VERIFIER(JSTypedArray)
@@ -9909,6 +9953,9 @@ class JSTypedArray: public JSArrayBufferView {
kSize + v8::ArrayBufferView::kInternalFieldCount * kPointerSize;
private:
+ static Handle<JSArrayBuffer> MaterializeArrayBuffer(
+ Handle<JSTypedArray> typed_array);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSTypedArray);
};
@@ -9993,22 +10040,30 @@ class JSArray: public JSObject {
// Initialize the array with the given capacity. The function may
// fail due to out-of-memory situations, but only if the requested
// capacity is non-zero.
- MUST_USE_RESULT MaybeObject* Initialize(int capacity, int length = 0);
+ static void Initialize(Handle<JSArray> array, int capacity, int length = 0);
// Initializes the array to a certain length.
inline bool AllowsSetElementsLength();
// Can cause GC.
- MUST_USE_RESULT MaybeObject* SetElementsLength(Object* length);
+ static Handle<Object> SetElementsLength(Handle<JSArray> array,
+ Handle<Object> length);
// Set the content of the array to the content of storage.
- MUST_USE_RESULT inline MaybeObject* SetContent(FixedArrayBase* storage);
+ static inline void SetContent(Handle<JSArray> array,
+ Handle<FixedArrayBase> storage);
// Casting.
static inline JSArray* cast(Object* obj);
- // Uses handles. Ensures that the fixed array backing the JSArray has at
+ // Ensures that the fixed array backing the JSArray has at
// least the stated size.
- inline void EnsureSize(int minimum_size_of_backing_fixed_array);
+ static inline void EnsureSize(Handle<JSArray> array,
+ int minimum_size_of_backing_fixed_array);
+
+ // Expand the fixed array backing of a fast-case JSArray to at least
+ // the requested size.
+ static void Expand(Handle<JSArray> array,
+ int minimum_size_of_backing_fixed_array);
// Dispatched behavior.
DECLARE_PRINTER(JSArray)
@@ -10022,10 +10077,6 @@ class JSArray: public JSObject {
static const int kSize = kLengthOffset + kPointerSize;
private:
- // Expand the fixed array backing of a fast-case JSArray to at least
- // the requested size.
- void Expand(int minimum_size_of_backing_fixed_array);
-
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
};
@@ -10646,6 +10697,7 @@ class BreakPointInfo: public Struct {
V(kStringTable, "string_table", "(Internalized strings)") \
V(kExternalStringsTable, "external_strings_table", "(External strings)") \
V(kStrongRootList, "strong_root_list", "(Strong roots)") \
+ V(kSmiRootList, "smi_root_list", "(Smi roots)") \
V(kInternalizedString, "internalized_string", "(Internal string)") \
V(kBootstrapper, "bootstrapper", "(Bootstrapper)") \
V(kTop, "top", "(Isolate)") \
@@ -10685,6 +10737,9 @@ class ObjectVisitor BASE_EMBEDDED {
// Handy shorthand for visiting a single pointer.
virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
+ // Visit weak next_code_link in Code object.
+ virtual void VisitNextCodeLink(Object** p) { VisitPointers(p, p + 1); }
+
// To allow lazy clearing of inline caches the visitor has
// a rich interface for iterating over Code objects..
diff --git a/deps/v8/src/optimizing-compiler-thread.cc b/deps/v8/src/optimizing-compiler-thread.cc
index d21507084..fb3eac5d5 100644
--- a/deps/v8/src/optimizing-compiler-thread.cc
+++ b/deps/v8/src/optimizing-compiler-thread.cc
@@ -258,9 +258,13 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
BackEdgeTable::RemoveStackCheck(code, offset);
} else {
- Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
- function->ReplaceCode(
- code.is_null() ? function->shared()->code() : *code);
+ if (function->IsOptimized()) {
+ DisposeOptimizedCompileJob(job, false);
+ } else {
+ Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
+ function->ReplaceCode(
+ code.is_null() ? function->shared()->code() : *code);
+ }
}
}
}
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 5e7680e6c..a00adb8c1 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -33,7 +33,6 @@
#include "char-predicates-inl.h"
#include "codegen.h"
#include "compiler.h"
-#include "func-name-inferrer.h"
#include "messages.h"
#include "parser.h"
#include "platform.h"
@@ -46,49 +45,6 @@
namespace v8 {
namespace internal {
-// PositionStack is used for on-stack allocation of token positions for
-// new expressions. Please look at ParseNewExpression.
-
-class PositionStack {
- public:
- explicit PositionStack(bool* ok) : top_(NULL), ok_(ok) {}
- ~PositionStack() {
- ASSERT(!*ok_ || is_empty());
- USE(ok_);
- }
-
- class Element {
- public:
- Element(PositionStack* stack, int value) {
- previous_ = stack->top();
- value_ = value;
- stack->set_top(this);
- }
-
- private:
- Element* previous() { return previous_; }
- int value() { return value_; }
- friend class PositionStack;
- Element* previous_;
- int value_;
- };
-
- bool is_empty() { return top_ == NULL; }
- int pop() {
- ASSERT(!is_empty());
- int result = top_->value();
- top_ = top_->previous();
- return result;
- }
-
- private:
- Element* top() { return top_; }
- void set_top(Element* value) { top_ = value; }
- Element* top_;
- bool* ok_;
-};
-
-
RegExpBuilder::RegExpBuilder(Zone* zone)
: zone_(zone),
pending_empty_(false),
@@ -249,25 +205,6 @@ void RegExpBuilder::AddQuantifierToAtom(
}
-Handle<String> Parser::LookupSymbol(int symbol_id) {
- // Length of symbol cache is the number of identified symbols.
- // If we are larger than that, or negative, it's not a cached symbol.
- // This might also happen if there is no preparser symbol data, even
- // if there is some preparser data.
- if (static_cast<unsigned>(symbol_id)
- >= static_cast<unsigned>(symbol_cache_.length())) {
- if (scanner().is_literal_ascii()) {
- return isolate()->factory()->InternalizeOneByteString(
- Vector<const uint8_t>::cast(scanner().literal_ascii_string()));
- } else {
- return isolate()->factory()->InternalizeTwoByteString(
- scanner().literal_utf16_string());
- }
- }
- return LookupCachedSymbol(symbol_id);
-}
-
-
Handle<String> Parser::LookupCachedSymbol(int symbol_id) {
// Make sure the cache is large enough to hold the symbol identifier.
if (symbol_cache_.length() <= symbol_id) {
@@ -277,13 +214,8 @@ Handle<String> Parser::LookupCachedSymbol(int symbol_id) {
}
Handle<String> result = symbol_cache_.at(symbol_id);
if (result.is_null()) {
- if (scanner().is_literal_ascii()) {
- result = isolate()->factory()->InternalizeOneByteString(
- Vector<const uint8_t>::cast(scanner().literal_ascii_string()));
- } else {
- result = isolate()->factory()->InternalizeTwoByteString(
- scanner().literal_utf16_string());
- }
+ result = scanner()->AllocateInternalizedString(isolate_);
+ ASSERT(!result.is_null());
symbol_cache_.at(symbol_id) = result;
return result;
}
@@ -463,54 +395,6 @@ class TargetScope BASE_EMBEDDED {
// ----------------------------------------------------------------------------
-// FunctionState and BlockState together implement the parser's scope stack.
-// The parser's current scope is in top_scope_. The BlockState and
-// FunctionState constructors push on the scope stack and the destructors
-// pop. They are also used to hold the parser's per-function and per-block
-// state.
-
-class Parser::BlockState BASE_EMBEDDED {
- public:
- BlockState(Parser* parser, Scope* scope)
- : parser_(parser),
- outer_scope_(parser->top_scope_) {
- parser->top_scope_ = scope;
- }
-
- ~BlockState() { parser_->top_scope_ = outer_scope_; }
-
- private:
- Parser* parser_;
- Scope* outer_scope_;
-};
-
-
-Parser::FunctionState::FunctionState(Parser* parser, Scope* scope)
- : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
- next_handler_index_(0),
- expected_property_count_(0),
- generator_object_variable_(NULL),
- parser_(parser),
- outer_function_state_(parser->current_function_state_),
- outer_scope_(parser->top_scope_),
- saved_ast_node_id_(parser->zone()->isolate()->ast_node_id()),
- factory_(parser->zone()) {
- parser->top_scope_ = scope;
- parser->current_function_state_ = this;
- parser->zone()->isolate()->set_ast_node_id(BailoutId::FirstUsable().ToInt());
-}
-
-
-Parser::FunctionState::~FunctionState() {
- parser_->top_scope_ = outer_scope_;
- parser_->current_function_state_ = outer_function_state_;
- if (outer_function_state_ != NULL) {
- parser_->isolate()->set_ast_node_id(saved_ast_node_id_);
- }
-}
-
-
-// ----------------------------------------------------------------------------
// The CHECK_OK macro is a convenient macro to enforce error
// handling for functions that may fail (by returning !*ok).
//
@@ -533,22 +417,371 @@ Parser::FunctionState::~FunctionState() {
// ----------------------------------------------------------------------------
// Implementation of Parser
+bool ParserTraits::IsEvalOrArguments(Handle<String> identifier) const {
+ return identifier.is_identical_to(
+ parser_->isolate()->factory()->eval_string()) ||
+ identifier.is_identical_to(
+ parser_->isolate()->factory()->arguments_string());
+}
+
+
+bool ParserTraits::IsThisProperty(Expression* expression) {
+ ASSERT(expression != NULL);
+ Property* property = expression->AsProperty();
+ return property != NULL &&
+ property->obj()->AsVariableProxy() != NULL &&
+ property->obj()->AsVariableProxy()->is_this();
+}
+
+
+bool ParserTraits::IsIdentifier(Expression* expression) {
+ VariableProxy* operand = expression->AsVariableProxy();
+ return operand != NULL && !operand->is_this();
+}
+
+
+void ParserTraits::PushPropertyName(FuncNameInferrer* fni,
+ Expression* expression) {
+ if (expression->IsPropertyName()) {
+ fni->PushLiteralName(expression->AsLiteral()->AsPropertyName());
+ } else {
+ fni->PushLiteralName(
+ parser_->isolate()->factory()->anonymous_function_string());
+ }
+}
+
+
+void ParserTraits::CheckAssigningFunctionLiteralToProperty(Expression* left,
+ Expression* right) {
+ ASSERT(left != NULL);
+ if (left->AsProperty() != NULL &&
+ right->AsFunctionLiteral() != NULL) {
+ right->AsFunctionLiteral()->set_pretenure();
+ }
+}
+
+
+void ParserTraits::CheckPossibleEvalCall(Expression* expression,
+ Scope* scope) {
+ VariableProxy* callee = expression->AsVariableProxy();
+ if (callee != NULL &&
+ callee->IsVariable(parser_->isolate()->factory()->eval_string())) {
+ scope->DeclarationScope()->RecordEvalCall();
+ }
+}
+
+
+Expression* ParserTraits::MarkExpressionAsLValue(Expression* expression) {
+ VariableProxy* proxy = expression != NULL
+ ? expression->AsVariableProxy()
+ : NULL;
+ if (proxy != NULL) proxy->MarkAsLValue();
+ return expression;
+}
+
+
+void ParserTraits::CheckStrictModeLValue(Expression* expression,
+ bool* ok) {
+ VariableProxy* lhs = expression != NULL
+ ? expression->AsVariableProxy()
+ : NULL;
+ if (lhs != NULL && !lhs->is_this() && IsEvalOrArguments(lhs->name())) {
+ parser_->ReportMessage("strict_eval_arguments",
+ Vector<const char*>::empty());
+ *ok = false;
+ }
+}
+
+
+bool ParserTraits::ShortcutNumericLiteralBinaryExpression(
+ Expression** x, Expression* y, Token::Value op, int pos,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ if ((*x)->AsLiteral() && (*x)->AsLiteral()->value()->IsNumber() &&
+ y->AsLiteral() && y->AsLiteral()->value()->IsNumber()) {
+ double x_val = (*x)->AsLiteral()->value()->Number();
+ double y_val = y->AsLiteral()->value()->Number();
+ switch (op) {
+ case Token::ADD:
+ *x = factory->NewNumberLiteral(x_val + y_val, pos);
+ return true;
+ case Token::SUB:
+ *x = factory->NewNumberLiteral(x_val - y_val, pos);
+ return true;
+ case Token::MUL:
+ *x = factory->NewNumberLiteral(x_val * y_val, pos);
+ return true;
+ case Token::DIV:
+ *x = factory->NewNumberLiteral(x_val / y_val, pos);
+ return true;
+ case Token::BIT_OR: {
+ int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
+ *x = factory->NewNumberLiteral(value, pos);
+ return true;
+ }
+ case Token::BIT_AND: {
+ int value = DoubleToInt32(x_val) & DoubleToInt32(y_val);
+ *x = factory->NewNumberLiteral(value, pos);
+ return true;
+ }
+ case Token::BIT_XOR: {
+ int value = DoubleToInt32(x_val) ^ DoubleToInt32(y_val);
+ *x = factory->NewNumberLiteral(value, pos);
+ return true;
+ }
+ case Token::SHL: {
+ int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
+ *x = factory->NewNumberLiteral(value, pos);
+ return true;
+ }
+ case Token::SHR: {
+ uint32_t shift = DoubleToInt32(y_val) & 0x1f;
+ uint32_t value = DoubleToUint32(x_val) >> shift;
+ *x = factory->NewNumberLiteral(value, pos);
+ return true;
+ }
+ case Token::SAR: {
+ uint32_t shift = DoubleToInt32(y_val) & 0x1f;
+ int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
+ *x = factory->NewNumberLiteral(value, pos);
+ return true;
+ }
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
+
+Expression* ParserTraits::BuildUnaryExpression(
+ Expression* expression, Token::Value op, int pos,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ ASSERT(expression != NULL);
+ if (expression->AsLiteral() != NULL) {
+ Handle<Object> literal = expression->AsLiteral()->value();
+ if (op == Token::NOT) {
+ // Convert the literal to a boolean condition and negate it.
+ bool condition = literal->BooleanValue();
+ Handle<Object> result =
+ parser_->isolate()->factory()->ToBoolean(!condition);
+ return factory->NewLiteral(result, pos);
+ } else if (literal->IsNumber()) {
+ // Compute some expressions involving only number literals.
+ double value = literal->Number();
+ switch (op) {
+ case Token::ADD:
+ return expression;
+ case Token::SUB:
+ return factory->NewNumberLiteral(-value, pos);
+ case Token::BIT_NOT:
+ return factory->NewNumberLiteral(~DoubleToInt32(value), pos);
+ default:
+ break;
+ }
+ }
+ }
+ // Desugar '+foo' => 'foo*1'
+ if (op == Token::ADD) {
+ return factory->NewBinaryOperation(
+ Token::MUL, expression, factory->NewNumberLiteral(1, pos), pos);
+ }
+ // The same idea for '-foo' => 'foo*(-1)'.
+ if (op == Token::SUB) {
+ return factory->NewBinaryOperation(
+ Token::MUL, expression, factory->NewNumberLiteral(-1, pos), pos);
+ }
+ // ...and one more time for '~foo' => 'foo^(~0)'.
+ if (op == Token::BIT_NOT) {
+ return factory->NewBinaryOperation(
+ Token::BIT_XOR, expression, factory->NewNumberLiteral(~0, pos), pos);
+ }
+ return factory->NewUnaryOperation(op, expression, pos);
+}
+
+
+void ParserTraits::ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ Vector<const char*> args,
+ bool is_reference_error) {
+ if (parser_->stack_overflow()) {
+ // Suppress the error message (syntax error or such) in the presence of a
+ // stack overflow. The isolate allows only one pending exception at at time
+ // and we want to report the stack overflow later.
+ return;
+ }
+ MessageLocation location(parser_->script_,
+ source_location.beg_pos,
+ source_location.end_pos);
+ Factory* factory = parser_->isolate()->factory();
+ Handle<FixedArray> elements = factory->NewFixedArray(args.length());
+ for (int i = 0; i < args.length(); i++) {
+ Handle<String> arg_string = factory->NewStringFromUtf8(CStrVector(args[i]));
+ ASSERT(!arg_string.is_null());
+ elements->set(i, *arg_string);
+ }
+ Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
+ Handle<Object> result = is_reference_error
+ ? factory->NewReferenceError(message, array)
+ : factory->NewSyntaxError(message, array);
+ parser_->isolate()->Throw(*result, &location);
+}
+
+
+void ParserTraits::ReportMessage(const char* message,
+ Vector<Handle<String> > args,
+ bool is_reference_error) {
+ Scanner::Location source_location = parser_->scanner()->location();
+ ReportMessageAt(source_location, message, args, is_reference_error);
+}
+
+
+void ParserTraits::ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ Vector<Handle<String> > args,
+ bool is_reference_error) {
+ if (parser_->stack_overflow()) {
+ // Suppress the error message (syntax error or such) in the presence of a
+ // stack overflow. The isolate allows only one pending exception at at time
+ // and we want to report the stack overflow later.
+ return;
+ }
+ MessageLocation location(parser_->script_,
+ source_location.beg_pos,
+ source_location.end_pos);
+ Factory* factory = parser_->isolate()->factory();
+ Handle<FixedArray> elements = factory->NewFixedArray(args.length());
+ for (int i = 0; i < args.length(); i++) {
+ elements->set(i, *args[i]);
+ }
+ Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
+ Handle<Object> result = is_reference_error
+ ? factory->NewReferenceError(message, array)
+ : factory->NewSyntaxError(message, array);
+ parser_->isolate()->Throw(*result, &location);
+}
+
+
+Handle<String> ParserTraits::GetSymbol(Scanner* scanner) {
+ if (parser_->cached_data_mode() == CONSUME_CACHED_DATA) {
+ int symbol_id = (*parser_->cached_data())->GetSymbolIdentifier();
+ // If there is no symbol data, -1 will be returned.
+ if (symbol_id >= 0 &&
+ symbol_id < (*parser_->cached_data())->symbol_count()) {
+ return parser_->LookupCachedSymbol(symbol_id);
+ }
+ } else if (parser_->cached_data_mode() == PRODUCE_CACHED_DATA) {
+ if (parser_->log_->ShouldLogSymbols()) {
+ parser_->scanner()->LogSymbol(parser_->log_, parser_->position());
+ }
+ }
+ Handle<String> result =
+ parser_->scanner()->AllocateInternalizedString(parser_->isolate_);
+ ASSERT(!result.is_null());
+ return result;
+}
+
+
+Handle<String> ParserTraits::NextLiteralString(Scanner* scanner,
+ PretenureFlag tenured) {
+ return scanner->AllocateNextLiteralString(parser_->isolate(), tenured);
+}
+
+
+Expression* ParserTraits::ThisExpression(
+ Scope* scope,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ return factory->NewVariableProxy(scope->receiver());
+}
+
+
+Literal* ParserTraits::ExpressionFromLiteral(
+ Token::Value token, int pos,
+ Scanner* scanner,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ Factory* isolate_factory = parser_->isolate()->factory();
+ switch (token) {
+ case Token::NULL_LITERAL:
+ return factory->NewLiteral(isolate_factory->null_value(), pos);
+ case Token::TRUE_LITERAL:
+ return factory->NewLiteral(isolate_factory->true_value(), pos);
+ case Token::FALSE_LITERAL:
+ return factory->NewLiteral(isolate_factory->false_value(), pos);
+ case Token::NUMBER: {
+ double value = scanner->DoubleValue();
+ return factory->NewNumberLiteral(value, pos);
+ }
+ default:
+ ASSERT(false);
+ }
+ return NULL;
+}
+
+
+Expression* ParserTraits::ExpressionFromIdentifier(
+ Handle<String> name, int pos, Scope* scope,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ if (parser_->fni_ != NULL) parser_->fni_->PushVariableName(name);
+ // The name may refer to a module instance object, so its type is unknown.
+#ifdef DEBUG
+ if (FLAG_print_interface_details)
+ PrintF("# Variable %s ", name->ToAsciiArray());
+#endif
+ Interface* interface = Interface::NewUnknown(parser_->zone());
+ return scope->NewUnresolved(factory, name, interface, pos);
+}
+
+
+Expression* ParserTraits::ExpressionFromString(
+ int pos, Scanner* scanner,
+ AstNodeFactory<AstConstructionVisitor>* factory) {
+ Handle<String> symbol = GetSymbol(scanner);
+ if (parser_->fni_ != NULL) parser_->fni_->PushLiteralName(symbol);
+ return factory->NewLiteral(symbol, pos);
+}
+
+
+Literal* ParserTraits::GetLiteralTheHole(
+ int position, AstNodeFactory<AstConstructionVisitor>* factory) {
+ return factory->NewLiteral(parser_->isolate()->factory()->the_hole_value(),
+ RelocInfo::kNoPosition);
+}
+
+
+Expression* ParserTraits::ParseV8Intrinsic(bool* ok) {
+ return parser_->ParseV8Intrinsic(ok);
+}
+
+
+FunctionLiteral* ParserTraits::ParseFunctionLiteral(
+ Handle<String> name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ int function_token_position,
+ FunctionLiteral::FunctionType type,
+ bool* ok) {
+ return parser_->ParseFunctionLiteral(name, function_name_location,
+ name_is_strict_reserved, is_generator,
+ function_token_position, type, ok);
+}
+
+
Parser::Parser(CompilationInfo* info)
- : ParserBase(&scanner_, info->isolate()->stack_guard()->real_climit()),
+ : ParserBase<ParserTraits>(&scanner_,
+ info->isolate()->stack_guard()->real_climit(),
+ info->extension(),
+ NULL,
+ info->zone(),
+ this),
isolate_(info->isolate()),
symbol_cache_(0, info->zone()),
script_(info->script()),
scanner_(isolate_->unicode_cache()),
reusable_preparser_(NULL),
- top_scope_(NULL),
original_scope_(NULL),
- current_function_state_(NULL),
target_stack_(NULL),
- extension_(info->extension()),
- pre_parse_data_(NULL),
- fni_(NULL),
- parenthesized_function_(false),
- zone_(info->zone()),
+ cached_data_(NULL),
+ cached_data_mode_(NO_CACHED_DATA),
info_(info) {
ASSERT(!script_.is_null());
isolate_->set_ast_node_id(0);
@@ -575,6 +808,13 @@ FunctionLiteral* Parser::ParseProgram() {
fni_ = new(zone()) FuncNameInferrer(isolate(), zone());
// Initialize parser state.
+ CompleteParserRecorder recorder;
+ if (cached_data_mode_ == PRODUCE_CACHED_DATA) {
+ log_ = &recorder;
+ } else if (cached_data_mode_ == CONSUME_CACHED_DATA) {
+ (*cached_data_)->Initialize();
+ }
+
source->TryFlatten();
FunctionLiteral* result;
if (source->IsExternalTwoByteString()) {
@@ -604,27 +844,31 @@ FunctionLiteral* Parser::ParseProgram() {
}
PrintF(" - took %0.3f ms]\n", ms);
}
+ if (cached_data_mode_ == PRODUCE_CACHED_DATA) {
+ Vector<unsigned> store = recorder.ExtractData();
+ *cached_data_ = new ScriptDataImpl(store);
+ log_ = NULL;
+ }
return result;
}
FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
Handle<String> source) {
- ASSERT(top_scope_ == NULL);
+ ASSERT(scope_ == NULL);
ASSERT(target_stack_ == NULL);
- if (pre_parse_data_ != NULL) pre_parse_data_->Initialize();
Handle<String> no_name = isolate()->factory()->empty_string();
FunctionLiteral* result = NULL;
- { Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
+ { Scope* scope = NewScope(scope_, GLOBAL_SCOPE);
info->SetGlobalScope(scope);
if (!info->context().is_null()) {
scope = Scope::DeserializeScopeChain(*info->context(), scope, zone());
}
original_scope_ = scope;
if (info->is_eval()) {
- if (!scope->is_global_scope() || info->language_mode() != CLASSIC_MODE) {
+ if (!scope->is_global_scope() || info->strict_mode() == STRICT) {
scope = NewScope(scope, EVAL_SCOPE);
}
} else if (info->is_global()) {
@@ -643,19 +887,19 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
ParsingModeScope parsing_mode(this, mode);
// Enters 'scope'.
- FunctionState function_state(this, scope);
+ FunctionState function_state(&function_state_, &scope_, scope, zone());
- top_scope_->SetLanguageMode(info->language_mode());
+ scope_->SetStrictMode(info->strict_mode());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
- int beg_pos = scanner().location().beg_pos;
+ int beg_pos = scanner()->location().beg_pos;
ParseSourceElements(body, Token::EOS, info->is_eval(), true, &ok);
- if (ok && !top_scope_->is_classic_mode()) {
- CheckOctalLiteral(beg_pos, scanner().location().end_pos, &ok);
+ if (ok && strict_mode() == STRICT) {
+ CheckOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
}
- if (ok && is_extended_mode()) {
- CheckConflictingVarDeclarations(top_scope_, &ok);
+ if (ok && allow_harmony_scoping() && strict_mode() == STRICT) {
+ CheckConflictingVarDeclarations(scope_, &ok);
}
if (ok && info->parse_restriction() == ONLY_SINGLE_FUNCTION_LITERAL) {
@@ -671,7 +915,7 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
if (ok) {
result = factory()->NewFunctionLiteral(
no_name,
- top_scope_,
+ scope_,
body,
function_state.materialized_literal_count(),
function_state.expected_property_count(),
@@ -684,6 +928,7 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
FunctionLiteral::kNotGenerator,
0);
result->set_ast_properties(factory()->visitor()->ast_properties());
+ result->set_slot_processor(factory()->visitor()->slot_processor());
result->set_dont_optimize_reason(
factory()->visitor()->dont_optimize_reason());
} else if (stack_overflow()) {
@@ -736,7 +981,7 @@ FunctionLiteral* Parser::ParseLazy() {
FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
Handle<SharedFunctionInfo> shared_info = info()->shared_info();
scanner_.Initialize(source);
- ASSERT(top_scope_ == NULL);
+ ASSERT(scope_ == NULL);
ASSERT(target_stack_ == NULL);
Handle<String> name(String::cast(shared_info->name()));
@@ -750,19 +995,17 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
{
// Parse the function literal.
- Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
+ Scope* scope = NewScope(scope_, GLOBAL_SCOPE);
info()->SetGlobalScope(scope);
if (!info()->closure().is_null()) {
scope = Scope::DeserializeScopeChain(info()->closure()->context(), scope,
zone());
}
original_scope_ = scope;
- FunctionState function_state(this, scope);
- ASSERT(scope->language_mode() != STRICT_MODE || !info()->is_classic_mode());
- ASSERT(scope->language_mode() != EXTENDED_MODE ||
- info()->is_extended_mode());
- ASSERT(info()->language_mode() == shared_info->language_mode());
- scope->SetLanguageMode(shared_info->language_mode());
+ FunctionState function_state(&function_state_, &scope_, scope, zone());
+ ASSERT(scope->strict_mode() == SLOPPY || info()->strict_mode() == STRICT);
+ ASSERT(info()->strict_mode() == shared_info->strict_mode());
+ scope->SetStrictMode(shared_info->strict_mode());
FunctionLiteral::FunctionType function_type = shared_info->is_expression()
? (shared_info->is_anonymous()
? FunctionLiteral::ANONYMOUS_EXPRESSION
@@ -793,62 +1036,6 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
}
-Handle<String> Parser::GetSymbol() {
- int symbol_id = -1;
- if (pre_parse_data() != NULL) {
- symbol_id = pre_parse_data()->GetSymbolIdentifier();
- }
- return LookupSymbol(symbol_id);
-}
-
-
-void Parser::ReportMessage(const char* message, Vector<const char*> args) {
- Scanner::Location source_location = scanner().location();
- ReportMessageAt(source_location, message, args);
-}
-
-
-void Parser::ReportMessage(const char* message, Vector<Handle<String> > args) {
- Scanner::Location source_location = scanner().location();
- ReportMessageAt(source_location, message, args);
-}
-
-
-void Parser::ReportMessageAt(Scanner::Location source_location,
- const char* message,
- Vector<const char*> args) {
- MessageLocation location(script_,
- source_location.beg_pos,
- source_location.end_pos);
- Factory* factory = isolate()->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- Handle<String> arg_string = factory->NewStringFromUtf8(CStrVector(args[i]));
- elements->set(i, *arg_string);
- }
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = factory->NewSyntaxError(message, array);
- isolate()->Throw(*result, &location);
-}
-
-
-void Parser::ReportMessageAt(Scanner::Location source_location,
- const char* message,
- Vector<Handle<String> > args) {
- MessageLocation location(script_,
- source_location.beg_pos,
- source_location.end_pos);
- Factory* factory = isolate()->factory();
- Handle<FixedArray> elements = factory->NewFixedArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- elements->set(i, *args[i]);
- }
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> result = factory->NewSyntaxError(message, array);
- isolate()->Throw(*result, &location);
-}
-
-
void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
int end_token,
bool is_eval,
@@ -871,7 +1058,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
directive_prologue = false;
}
- Scanner::Location token_loc = scanner().peek_location();
+ Scanner::Location token_loc = scanner()->peek_location();
Statement* stat;
if (is_global && !is_eval) {
stat = ParseModuleElement(NULL, CHECK_OK);
@@ -894,7 +1081,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
Handle<String> directive = Handle<String>::cast(literal->value());
// Check "use strict" directive (ES5 14.1).
- if (top_scope_->is_classic_mode() &&
+ if (strict_mode() == SLOPPY &&
directive->Equals(isolate()->heap()->use_strict_string()) &&
token_loc.end_pos - token_loc.beg_pos ==
isolate()->heap()->use_strict_string()->length() + 2) {
@@ -903,17 +1090,15 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
// add this scope in DoParseProgram(), but that requires adaptations
// all over the code base, so we go with a quick-fix for now.
// In the same manner, we have to patch the parsing mode.
- if (is_eval && !top_scope_->is_eval_scope()) {
- ASSERT(top_scope_->is_global_scope());
- Scope* scope = NewScope(top_scope_, EVAL_SCOPE);
- scope->set_start_position(top_scope_->start_position());
- scope->set_end_position(top_scope_->end_position());
- top_scope_ = scope;
+ if (is_eval && !scope_->is_eval_scope()) {
+ ASSERT(scope_->is_global_scope());
+ Scope* scope = NewScope(scope_, EVAL_SCOPE);
+ scope->set_start_position(scope_->start_position());
+ scope->set_end_position(scope_->end_position());
+ scope_ = scope;
mode_ = PARSE_EAGERLY;
}
- // TODO(ES6): Fix entering extended mode, once it is specified.
- top_scope_->SetLanguageMode(allow_harmony_scoping()
- ? EXTENDED_MODE : STRICT_MODE);
+ scope_->SetStrictMode(STRICT);
// "use strict" is the only directive for now.
directive_prologue = false;
}
@@ -961,14 +1146,14 @@ Statement* Parser::ParseModuleElement(ZoneStringList* labels,
// Handle 'module' as a context-sensitive keyword.
if (FLAG_harmony_modules &&
peek() == Token::IDENTIFIER &&
- !scanner().HasAnyLineTerminatorBeforeNext() &&
+ !scanner()->HasAnyLineTerminatorBeforeNext() &&
stmt != NULL) {
ExpressionStatement* estmt = stmt->AsExpressionStatement();
if (estmt != NULL &&
estmt->expression()->AsVariableProxy() != NULL &&
estmt->expression()->AsVariableProxy()->name()->Equals(
isolate()->heap()->module_string()) &&
- !scanner().literal_contains_escapes()) {
+ !scanner()->literal_contains_escapes()) {
return ParseModuleDeclaration(NULL, ok);
}
}
@@ -993,7 +1178,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
Module* module = ParseModule(CHECK_OK);
VariableProxy* proxy = NewUnresolved(name, MODULE, module->interface());
Declaration* declaration =
- factory()->NewModuleDeclaration(proxy, module, top_scope_, pos);
+ factory()->NewModuleDeclaration(proxy, module, scope_, pos);
Declare(declaration, true, CHECK_OK);
#ifdef DEBUG
@@ -1051,14 +1236,14 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
#ifdef DEBUG
if (FLAG_print_interface_details) PrintF("# Literal ");
#endif
- Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
+ Scope* scope = NewScope(scope_, MODULE_SCOPE);
Expect(Token::LBRACE, CHECK_OK);
- scope->set_start_position(scanner().location().beg_pos);
- scope->SetLanguageMode(EXTENDED_MODE);
+ scope->set_start_position(scanner()->location().beg_pos);
+ scope->SetStrictMode(STRICT);
{
- BlockState block_state(this, scope);
+ BlockState block_state(&scope_, scope);
TargetCollector collector(zone());
Target target(&this->target_stack_, &collector);
Target target_body(&this->target_stack_, body);
@@ -1072,7 +1257,7 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
}
Expect(Token::RBRACE, CHECK_OK);
- scope->set_end_position(scanner().location().end_pos);
+ scope->set_end_position(scanner()->location().end_pos);
body->set_scope(scope);
// Check that all exports are bound.
@@ -1081,8 +1266,8 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
!it.done(); it.Advance()) {
if (scope->LocalLookup(it.name()) == NULL) {
Handle<String> name(it.name());
- ReportMessage("module_export_undefined",
- Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("module_export_undefined",
+ Vector<Handle<String> >(&name, 1));
*ok = false;
return NULL;
}
@@ -1121,7 +1306,8 @@ Module* Parser::ParseModulePath(bool* ok) {
member->interface()->Print();
}
#endif
- ReportMessage("invalid_module_path", Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("invalid_module_path",
+ Vector<Handle<String> >(&name, 1));
return NULL;
}
result = member;
@@ -1141,9 +1327,9 @@ Module* Parser::ParseModuleVariable(bool* ok) {
if (FLAG_print_interface_details)
PrintF("# Module variable %s ", name->ToAsciiArray());
#endif
- VariableProxy* proxy = top_scope_->NewUnresolved(
+ VariableProxy* proxy = scope_->NewUnresolved(
factory(), name, Interface::NewModule(zone()),
- scanner().location().beg_pos);
+ scanner()->location().beg_pos);
return factory()->NewModuleVariable(proxy, pos);
}
@@ -1165,7 +1351,7 @@ Module* Parser::ParseModuleUrl(bool* ok) {
// Create an empty literal as long as the feature isn't finished.
USE(symbol);
- Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
+ Scope* scope = NewScope(scope_, MODULE_SCOPE);
Block* body = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
body->set_scope(scope);
Interface* interface = scope->interface();
@@ -1231,12 +1417,13 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
module->interface()->Print();
}
#endif
- ReportMessage("invalid_module_path", Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("invalid_module_path",
+ Vector<Handle<String> >(&name, 1));
return NULL;
}
VariableProxy* proxy = NewUnresolved(names[i], LET, interface);
Declaration* declaration =
- factory()->NewImportDeclaration(proxy, module, top_scope_, pos);
+ factory()->NewImportDeclaration(proxy, module, scope_, pos);
Declare(declaration, true, CHECK_OK);
}
@@ -1291,12 +1478,12 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
default:
*ok = false;
- ReportUnexpectedToken(scanner().current_token());
+ ReportUnexpectedToken(scanner()->current_token());
return NULL;
}
// Extract declared names into export declarations and interface.
- Interface* interface = top_scope_->interface();
+ Interface* interface = scope_->interface();
for (int i = 0; i < names.length(); ++i) {
#ifdef DEBUG
if (FLAG_print_interface_details)
@@ -1311,8 +1498,8 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
// TODO(rossberg): Rethink whether we actually need to store export
// declarations (for compilation?).
// ExportDeclaration* declaration =
- // factory()->NewExportDeclaration(proxy, top_scope_, position);
- // top_scope_->AddDeclaration(declaration);
+ // factory()->NewExportDeclaration(proxy, scope_, position);
+ // scope_->AddDeclaration(declaration);
}
ASSERT(result != NULL);
@@ -1438,9 +1625,8 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
// In Harmony mode, this case also handles the extension:
// Statement:
// GeneratorDeclaration
- if (!top_scope_->is_classic_mode()) {
- ReportMessageAt(scanner().peek_location(), "strict_function",
- Vector<const char*>::empty());
+ if (strict_mode() == STRICT) {
+ ReportMessageAt(scanner()->peek_location(), "strict_function");
*ok = false;
return NULL;
}
@@ -1484,7 +1670,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// Similarly, strict mode eval scope does not leak variable declarations to
// the caller's scope so we declare all locals, too.
if (declaration_scope->is_function_scope() ||
- declaration_scope->is_strict_or_extended_eval_scope() ||
+ declaration_scope->is_strict_eval_scope() ||
declaration_scope->is_block_scope() ||
declaration_scope->is_module_scope() ||
declaration_scope->is_global_scope()) {
@@ -1517,8 +1703,8 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// because the var declaration is hoisted to the function scope where 'x'
// is already bound.
ASSERT(IsDeclaredVariableMode(var->mode()));
- if (is_extended_mode()) {
- // In harmony mode we treat re-declarations as early errors. See
+ if (allow_harmony_scoping() && strict_mode() == STRICT) {
+ // In harmony we treat re-declarations as early errors. See
// ES5 16 for a definition of early errors.
SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
const char* elms[2] = { "Variable", c_string.get() };
@@ -1528,8 +1714,8 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
return;
}
Handle<String> message_string =
- isolate()->factory()->NewStringFromUtf8(CStrVector("Variable"),
- TENURED);
+ isolate()->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("Variable"));
Expression* expression =
NewThrowTypeError(isolate()->factory()->redeclaration_string(),
message_string, name);
@@ -1552,10 +1738,10 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// same variable if it is declared several times. This is not a
// semantic issue as long as we keep the source order, but it may be
// a performance issue since it may lead to repeated
- // Runtime::DeclareContextSlot() calls.
+ // RuntimeHidden_DeclareContextSlot calls.
declaration_scope->AddDeclaration(declaration);
- if (mode == CONST && declaration_scope->is_global_scope()) {
+ if (mode == CONST_LEGACY && declaration_scope->is_global_scope()) {
// For global const variables we bind the proxy to a variable.
ASSERT(resolve); // should be set by all callers
Variable::Kind kind = Variable::NORMAL;
@@ -1563,8 +1749,8 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
declaration_scope, name, mode, true, kind,
kNeedsInitialization, proxy->interface());
} else if (declaration_scope->is_eval_scope() &&
- declaration_scope->is_classic_mode()) {
- // For variable declarations in a non-strict eval scope the proxy is bound
+ declaration_scope->strict_mode() == SLOPPY) {
+ // For variable declarations in a sloppy eval scope the proxy is bound
// to a lookup variable to force a dynamic declaration using the
// DeclareContextSlot runtime function.
Variable::Kind kind = Variable::NORMAL;
@@ -1619,7 +1805,8 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
var->interface()->Print();
}
#endif
- ReportMessage("module_type_error", Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("module_type_error",
+ Vector<Handle<String> >(&name, 1));
}
}
}
@@ -1658,7 +1845,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// other functions are set up when entering the surrounding scope.
VariableProxy* proxy = NewUnresolved(name, VAR, Interface::NewValue());
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, VAR, top_scope_, pos);
+ factory()->NewVariableDeclaration(proxy, VAR, scope_, pos);
Declare(declaration, true, CHECK_OK);
NativeFunctionLiteral* lit = factory()->NewNativeFunctionLiteral(
name, extension_, RelocInfo::kNoPosition);
@@ -1682,7 +1869,7 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
Handle<String> name = ParseIdentifierOrStrictReservedWord(
&is_strict_reserved, CHECK_OK);
FunctionLiteral* fun = ParseFunctionLiteral(name,
- scanner().location(),
+ scanner()->location(),
is_strict_reserved,
is_generator,
pos,
@@ -1694,10 +1881,11 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
// In extended mode, a function behaves as a lexical binding, except in the
// global scope.
VariableMode mode =
- is_extended_mode() && !top_scope_->is_global_scope() ? LET : VAR;
+ allow_harmony_scoping() &&
+ strict_mode() == STRICT && !scope_->is_global_scope() ? LET : VAR;
VariableProxy* proxy = NewUnresolved(name, mode, Interface::NewValue());
Declaration* declaration =
- factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_, pos);
+ factory()->NewFunctionDeclaration(proxy, mode, fun, scope_, pos);
Declare(declaration, true, CHECK_OK);
if (names) names->Add(name, zone());
return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
@@ -1705,7 +1893,9 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
- if (top_scope_->is_extended_mode()) return ParseScopedBlock(labels, ok);
+ if (allow_harmony_scoping() && strict_mode() == STRICT) {
+ return ParseScopedBlock(labels, ok);
+ }
// Block ::
// '{' Statement* '}'
@@ -1738,12 +1928,12 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
// Construct block expecting 16 statements.
Block* body =
factory()->NewBlock(labels, 16, false, RelocInfo::kNoPosition);
- Scope* block_scope = NewScope(top_scope_, BLOCK_SCOPE);
+ Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
// Parse the statements and collect escaping labels.
Expect(Token::LBRACE, CHECK_OK);
- block_scope->set_start_position(scanner().location().beg_pos);
- { BlockState block_state(this, block_scope);
+ block_scope->set_start_position(scanner()->location().beg_pos);
+ { BlockState block_state(&scope_, block_scope);
TargetCollector collector(zone());
Target target(&this->target_stack_, &collector);
Target target_body(&this->target_stack_, body);
@@ -1756,7 +1946,7 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
}
}
Expect(Token::RBRACE, CHECK_OK);
- block_scope->set_end_position(scanner().location().end_pos);
+ block_scope->set_end_position(scanner()->location().end_pos);
block_scope = block_scope->FinalizeBlockScope();
body->set_scope(block_scope);
return body;
@@ -1777,12 +1967,6 @@ Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context,
}
-bool Parser::IsEvalOrArguments(Handle<String> string) {
- return string.is_identical_to(isolate()->factory()->eval_string()) ||
- string.is_identical_to(isolate()->factory()->arguments_string());
-}
-
-
// If the variable declaration declares exactly one non-const
// variable, then *out is set to that variable. In all other cases,
// *out is untouched; in particular, it is the caller's responsibility
@@ -1827,29 +2011,31 @@ Block* Parser::ParseVariableDeclarations(
// * It is a Syntax Error if the code that matches this production is not
// contained in extended code.
//
- // However disallowing const in classic mode will break compatibility with
+ // However disallowing const in sloppy mode will break compatibility with
// existing pages. Therefore we keep allowing const with the old
- // non-harmony semantics in classic mode.
+ // non-harmony semantics in sloppy mode.
Consume(Token::CONST);
- switch (top_scope_->language_mode()) {
- case CLASSIC_MODE:
- mode = CONST;
- init_op = Token::INIT_CONST;
+ switch (strict_mode()) {
+ case SLOPPY:
+ mode = CONST_LEGACY;
+ init_op = Token::INIT_CONST_LEGACY;
break;
- case STRICT_MODE:
- ReportMessage("strict_const", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- case EXTENDED_MODE:
- if (var_context == kStatement) {
- // In extended mode 'const' declarations are only allowed in source
- // element positions.
- ReportMessage("unprotected_const", Vector<const char*>::empty());
+ case STRICT:
+ if (allow_harmony_scoping()) {
+ if (var_context == kStatement) {
+ // In strict mode 'const' declarations are only allowed in source
+ // element positions.
+ ReportMessage("unprotected_const", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+ mode = CONST;
+ init_op = Token::INIT_CONST;
+ } else {
+ ReportMessage("strict_const", Vector<const char*>::empty());
*ok = false;
return NULL;
}
- mode = CONST_HARMONY;
- init_op = Token::INIT_CONST_HARMONY;
}
is_const = true;
needs_init = true;
@@ -1860,7 +2046,9 @@ Block* Parser::ParseVariableDeclarations(
//
// * It is a Syntax Error if the code that matches this production is not
// contained in extended code.
- if (!is_extended_mode()) {
+ //
+ // TODO(rossberg): make 'let' a legal identifier in sloppy mode.
+ if (!allow_harmony_scoping() || strict_mode() == SLOPPY) {
ReportMessage("illegal_let", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -1924,12 +2112,11 @@ Block* Parser::ParseVariableDeclarations(
is_const ? Interface::NewConst() : Interface::NewValue();
VariableProxy* proxy = NewUnresolved(name, mode, interface);
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, mode, top_scope_, pos);
+ factory()->NewVariableDeclaration(proxy, mode, scope_, pos);
Declare(declaration, mode != VAR, CHECK_OK);
nvars++;
if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
- ReportMessageAt(scanner().location(), "too_many_variables",
- Vector<const char*>::empty());
+ ReportMessageAt(scanner()->location(), "too_many_variables");
*ok = false;
return NULL;
}
@@ -1944,7 +2131,7 @@ Block* Parser::ParseVariableDeclarations(
//
// var v; v = x;
//
- // In particular, we need to re-lookup 'v' (in top_scope_, not
+ // In particular, we need to re-lookup 'v' (in scope_, not
// declaration_scope) as it may be a different 'v' than the 'v' in the
// declaration (e.g., if we are inside a 'with' statement or 'catch'
// block).
@@ -1962,11 +2149,11 @@ Block* Parser::ParseVariableDeclarations(
// one - there is no re-lookup (see the last parameter of the
// Declare() call above).
- Scope* initialization_scope = is_const ? declaration_scope : top_scope_;
+ Scope* initialization_scope = is_const ? declaration_scope : scope_;
Expression* value = NULL;
int pos = -1;
// Harmony consts have non-optional initializers.
- if (peek() == Token::ASSIGN || mode == CONST_HARMONY) {
+ if (peek() == Token::ASSIGN || mode == CONST) {
Expect(Token::ASSIGN, CHECK_OK);
pos = position();
value = ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
@@ -2029,13 +2216,13 @@ Block* Parser::ParseVariableDeclarations(
// the number of arguments (1 or 2).
initialize = factory()->NewCallRuntime(
isolate()->factory()->InitializeConstGlobal_string(),
- Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
+ Runtime::FunctionForId(Runtime::kHiddenInitializeConstGlobal),
arguments, pos);
} else {
// Add strict mode.
// We may want to pass singleton to avoid Literal allocations.
- LanguageMode language_mode = initialization_scope->language_mode();
- arguments->Add(factory()->NewNumberLiteral(language_mode, pos), zone());
+ StrictMode strict_mode = initialization_scope->strict_mode();
+ arguments->Add(factory()->NewNumberLiteral(strict_mode, pos), zone());
// Be careful not to assign a value to the global variable if
// we're in a with. The initialization value should not
@@ -2153,7 +2340,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// Remove the "ghost" variable that turned out to be a label
// from the top scope. This way, we don't try to resolve it
// during the scope processing.
- top_scope_->RemoveUnresolved(var);
+ scope_->RemoveUnresolved(var);
Expect(Token::COLON, CHECK_OK);
return ParseStatement(labels, ok);
}
@@ -2163,12 +2350,12 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// no line-terminator between the two words.
if (extension_ != NULL &&
peek() == Token::FUNCTION &&
- !scanner().HasAnyLineTerminatorBeforeNext() &&
+ !scanner()->HasAnyLineTerminatorBeforeNext() &&
expr != NULL &&
expr->AsVariableProxy() != NULL &&
expr->AsVariableProxy()->name()->Equals(
isolate()->heap()->native_string()) &&
- !scanner().literal_contains_escapes()) {
+ !scanner()->literal_contains_escapes()) {
return ParseNativeDeclaration(ok);
}
@@ -2176,11 +2363,11 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// Only expect semicolon in the former case.
if (!FLAG_harmony_modules ||
peek() != Token::IDENTIFIER ||
- scanner().HasAnyLineTerminatorBeforeNext() ||
+ scanner()->HasAnyLineTerminatorBeforeNext() ||
expr->AsVariableProxy() == NULL ||
!expr->AsVariableProxy()->name()->Equals(
isolate()->heap()->module_string()) ||
- scanner().literal_contains_escapes()) {
+ scanner()->literal_contains_escapes()) {
ExpectSemicolon(CHECK_OK);
}
return factory()->NewExpressionStatement(expr, pos);
@@ -2217,7 +2404,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
Expect(Token::CONTINUE, CHECK_OK);
Handle<String> label = Handle<String>::null();
Token::Value tok = peek();
- if (!scanner().HasAnyLineTerminatorBeforeNext() &&
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
// ECMA allows "eval" or "arguments" as labels even in strict mode.
label = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
@@ -2232,7 +2419,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
message = "unknown_label";
args = Vector<Handle<String> >(&label, 1);
}
- ReportMessageAt(scanner().location(), message, args);
+ ParserTraits::ReportMessageAt(scanner()->location(), message, args);
*ok = false;
return NULL;
}
@@ -2249,7 +2436,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::BREAK, CHECK_OK);
Handle<String> label;
Token::Value tok = peek();
- if (!scanner().HasAnyLineTerminatorBeforeNext() &&
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
// ECMA allows "eval" or "arguments" as labels even in strict mode.
label = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
@@ -2270,7 +2457,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
message = "unknown_label";
args = Vector<Handle<String> >(&label, 1);
}
- ReportMessageAt(scanner().location(), message, args);
+ ParserTraits::ReportMessageAt(scanner()->location(), message, args);
*ok = false;
return NULL;
}
@@ -2292,7 +2479,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
Token::Value tok = peek();
Statement* result;
Expression* return_value;
- if (scanner().HasAnyLineTerminatorBeforeNext() ||
+ if (scanner()->HasAnyLineTerminatorBeforeNext() ||
tok == Token::SEMICOLON ||
tok == Token::RBRACE ||
tok == Token::EOS) {
@@ -2303,7 +2490,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
ExpectSemicolon(CHECK_OK);
if (is_generator()) {
Expression* generator = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
+ function_state_->generator_object_variable());
Expression* yield = factory()->NewYield(
generator, return_value, Yield::FINAL, pos);
result = factory()->NewExpressionStatement(yield, pos);
@@ -2316,7 +2503,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// function. See ECMA-262, section 12.9, page 67.
//
// To be consistent with KJS we report the syntax error at runtime.
- Scope* declaration_scope = top_scope_->DeclarationScope();
+ Scope* declaration_scope = scope_->DeclarationScope();
if (declaration_scope->is_global_scope() ||
declaration_scope->is_eval_scope()) {
Handle<String> message = isolate()->factory()->illegal_return_string();
@@ -2335,7 +2522,7 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::WITH, CHECK_OK);
int pos = position();
- if (!top_scope_->is_classic_mode()) {
+ if (strict_mode() == STRICT) {
ReportMessage("strict_mode_with", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -2345,13 +2532,13 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
Expression* expr = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- top_scope_->DeclarationScope()->RecordWithStatement();
- Scope* with_scope = NewScope(top_scope_, WITH_SCOPE);
+ scope_->DeclarationScope()->RecordWithStatement();
+ Scope* with_scope = NewScope(scope_, WITH_SCOPE);
Statement* stmt;
- { BlockState block_state(this, with_scope);
- with_scope->set_start_position(scanner().peek_location().beg_pos);
+ { BlockState block_state(&scope_, with_scope);
+ with_scope->set_start_position(scanner()->peek_location().beg_pos);
stmt = ParseStatement(labels, CHECK_OK);
- with_scope->set_end_position(scanner().location().end_pos);
+ with_scope->set_end_position(scanner()->location().end_pos);
}
return factory()->NewWithStatement(with_scope, expr, stmt, pos);
}
@@ -2425,7 +2612,7 @@ Statement* Parser::ParseThrowStatement(bool* ok) {
Expect(Token::THROW, CHECK_OK);
int pos = position();
- if (scanner().HasAnyLineTerminatorBeforeNext()) {
+ if (scanner()->HasAnyLineTerminatorBeforeNext()) {
ReportMessage("newline_after_throw", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -2480,21 +2667,22 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Consume(Token::CATCH);
Expect(Token::LPAREN, CHECK_OK);
- catch_scope = NewScope(top_scope_, CATCH_SCOPE);
- catch_scope->set_start_position(scanner().location().beg_pos);
+ catch_scope = NewScope(scope_, CATCH_SCOPE);
+ catch_scope->set_start_position(scanner()->location().beg_pos);
name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
Target target(&this->target_stack_, &catch_collector);
- VariableMode mode = is_extended_mode() ? LET : VAR;
+ VariableMode mode =
+ allow_harmony_scoping() && strict_mode() == STRICT ? LET : VAR;
catch_variable =
catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
- BlockState block_state(this, catch_scope);
+ BlockState block_state(&scope_, catch_scope);
catch_block = ParseBlock(NULL, CHECK_OK);
- catch_scope->set_end_position(scanner().location().end_pos);
+ catch_scope->set_end_position(scanner()->location().end_pos);
tok = peek();
}
@@ -2513,7 +2701,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (catch_block != NULL && finally_block != NULL) {
// If we have both, create an inner try/catch.
ASSERT(catch_scope != NULL && catch_variable != NULL);
- int index = current_function_state_->NextHandlerIndex();
+ int index = function_state_->NextHandlerIndex();
TryCatchStatement* statement = factory()->NewTryCatchStatement(
index, try_block, catch_scope, catch_variable, catch_block,
RelocInfo::kNoPosition);
@@ -2527,12 +2715,12 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (catch_block != NULL) {
ASSERT(finally_block == NULL);
ASSERT(catch_scope != NULL && catch_variable != NULL);
- int index = current_function_state_->NextHandlerIndex();
+ int index = function_state_->NextHandlerIndex();
result = factory()->NewTryCatchStatement(
index, try_block, catch_scope, catch_variable, catch_block, pos);
} else {
ASSERT(finally_block != NULL);
- int index = current_function_state_->NextHandlerIndex();
+ int index = function_state_->NextHandlerIndex();
result = factory()->NewTryFinallyStatement(
index, try_block, finally_block, pos);
// Combine the jump targets of the try block and the possible catch block.
@@ -2612,9 +2800,9 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
if (for_of != NULL) {
Factory* heap_factory = isolate()->factory();
- Variable* iterator = top_scope_->DeclarationScope()->NewTemporary(
+ Variable* iterator = scope_->DeclarationScope()->NewTemporary(
heap_factory->dot_iterator_string());
- Variable* result = top_scope_->DeclarationScope()->NewTemporary(
+ Variable* result = scope_->DeclarationScope()->NewTemporary(
heap_factory->dot_result_string());
Expression* assign_iterator;
@@ -2681,13 +2869,13 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Statement* init = NULL;
// Create an in-between scope for let-bound iteration variables.
- Scope* saved_scope = top_scope_;
- Scope* for_scope = NewScope(top_scope_, BLOCK_SCOPE);
- top_scope_ = for_scope;
+ Scope* saved_scope = scope_;
+ Scope* for_scope = NewScope(scope_, BLOCK_SCOPE);
+ scope_ = for_scope;
Expect(Token::FOR, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
- for_scope->set_start_position(scanner().location().beg_pos);
+ for_scope->set_start_position(scanner()->location().beg_pos);
if (peek() != Token::SEMICOLON) {
if (peek() == Token::VAR || peek() == Token::CONST) {
bool is_const = peek() == Token::CONST;
@@ -2710,15 +2898,15 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::RPAREN, CHECK_OK);
VariableProxy* each =
- top_scope_->NewUnresolved(factory(), name, interface);
+ scope_->NewUnresolved(factory(), name, interface);
Statement* body = ParseStatement(NULL, CHECK_OK);
InitializeForEachStatement(loop, each, enumerable, body);
Block* result =
factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
result->AddStatement(variable_statement, zone());
result->AddStatement(loop, zone());
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
+ scope_ = saved_scope;
+ for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
ASSERT(for_scope == NULL);
// Parsed for-in loop w/ variable/const declaration.
@@ -2755,21 +2943,22 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Factory* heap_factory = isolate()->factory();
Handle<String> tempstr =
heap_factory->NewConsString(heap_factory->dot_for_string(), name);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate(), tempstr, 0);
Handle<String> tempname = heap_factory->InternalizeString(tempstr);
- Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname);
+ Variable* temp = scope_->DeclarationScope()->NewTemporary(tempname);
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, pos);
Target target(&this->target_stack_, loop);
// The expression does not see the loop variable.
- top_scope_ = saved_scope;
+ scope_ = saved_scope;
Expression* enumerable = ParseExpression(true, CHECK_OK);
- top_scope_ = for_scope;
+ scope_ = for_scope;
Expect(Token::RPAREN, CHECK_OK);
VariableProxy* each =
- top_scope_->NewUnresolved(factory(), name, Interface::NewValue());
+ scope_->NewUnresolved(factory(), name, Interface::NewValue());
Statement* body = ParseStatement(NULL, CHECK_OK);
Block* body_block =
factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
@@ -2781,8 +2970,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
body_block->AddStatement(assignment_statement, zone());
body_block->AddStatement(body, zone());
InitializeForEachStatement(loop, temp_proxy, enumerable, body_block);
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
+ scope_ = saved_scope;
+ for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
body_block->set_scope(for_scope);
// Parsed for-in loop w/ let declaration.
@@ -2792,19 +2981,16 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
init = variable_statement;
}
} else {
+ Scanner::Location lhs_location = scanner()->peek_location();
Expression* expression = ParseExpression(false, CHECK_OK);
ForEachStatement::VisitMode mode;
bool accept_OF = expression->AsVariableProxy();
if (CheckInOrOf(accept_OF, &mode)) {
- // Signal a reference error if the expression is an invalid
- // left-hand side expression. We could report this as a syntax
- // error here but for compatibility with JSC we choose to report
- // the error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> message =
- isolate()->factory()->invalid_lhs_in_for_in_string();
- expression = NewThrowReferenceError(message);
+ ReportMessageAt(lhs_location, "invalid_lhs_in_for", true);
+ *ok = false;
+ return NULL;
}
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, pos);
@@ -2815,8 +3001,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Statement* body = ParseStatement(NULL, CHECK_OK);
InitializeForEachStatement(loop, expression, enumerable, body);
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
+ scope_ = saved_scope;
+ for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
ASSERT(for_scope == NULL);
// Parsed for-in loop.
@@ -2850,8 +3036,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::RPAREN, CHECK_OK);
Statement* body = ParseStatement(NULL, CHECK_OK);
- top_scope_ = saved_scope;
- for_scope->set_end_position(scanner().location().end_pos);
+ scope_ = saved_scope;
+ for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
if (for_scope != NULL) {
// Rewrite a for statement of the form
@@ -2878,581 +3064,6 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
}
-// Precedence = 1
-Expression* Parser::ParseExpression(bool accept_IN, bool* ok) {
- // Expression ::
- // AssignmentExpression
- // Expression ',' AssignmentExpression
-
- Expression* result = ParseAssignmentExpression(accept_IN, CHECK_OK);
- while (peek() == Token::COMMA) {
- Expect(Token::COMMA, CHECK_OK);
- int pos = position();
- Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- result = factory()->NewBinaryOperation(Token::COMMA, result, right, pos);
- }
- return result;
-}
-
-
-// Precedence = 2
-Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
- // AssignmentExpression ::
- // ConditionalExpression
- // YieldExpression
- // LeftHandSideExpression AssignmentOperator AssignmentExpression
-
- if (peek() == Token::YIELD && is_generator()) {
- return ParseYieldExpression(ok);
- }
-
- if (fni_ != NULL) fni_->Enter();
- Expression* expression = ParseConditionalExpression(accept_IN, CHECK_OK);
-
- if (!Token::IsAssignmentOp(peek())) {
- if (fni_ != NULL) fni_->Leave();
- // Parsed conditional expression only (no assignment).
- return expression;
- }
-
- // Signal a reference error if the expression is an invalid left-hand
- // side expression. We could report this as a syntax error here but
- // for compatibility with JSC we choose to report the error at
- // runtime.
- // TODO(ES5): Should change parsing for spec conformance.
- if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> message =
- isolate()->factory()->invalid_lhs_in_assignment_string();
- expression = NewThrowReferenceError(message);
- }
-
- if (!top_scope_->is_classic_mode()) {
- // Assignment to eval or arguments is disallowed in strict mode.
- CheckStrictModeLValue(expression, CHECK_OK);
- }
- MarkAsLValue(expression);
-
- Token::Value op = Next(); // Get assignment operator.
- int pos = position();
- Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
-
- // TODO(1231235): We try to estimate the set of properties set by
- // constructors. We define a new property whenever there is an
- // assignment to a property of 'this'. We should probably only add
- // properties if we haven't seen them before. Otherwise we'll
- // probably overestimate the number of properties.
- Property* property = expression ? expression->AsProperty() : NULL;
- if (op == Token::ASSIGN &&
- property != NULL &&
- property->obj()->AsVariableProxy() != NULL &&
- property->obj()->AsVariableProxy()->is_this()) {
- current_function_state_->AddProperty();
- }
-
- // If we assign a function literal to a property we pretenure the
- // literal so it can be added as a constant function property.
- if (property != NULL && right->AsFunctionLiteral() != NULL) {
- right->AsFunctionLiteral()->set_pretenure();
- }
-
- if (fni_ != NULL) {
- // Check if the right hand side is a call to avoid inferring a
- // name if we're dealing with "a = function(){...}();"-like
- // expression.
- if ((op == Token::INIT_VAR
- || op == Token::INIT_CONST
- || op == Token::ASSIGN)
- && (right->AsCall() == NULL && right->AsCallNew() == NULL)) {
- fni_->Infer();
- } else {
- fni_->RemoveLastFunction();
- }
- fni_->Leave();
- }
-
- return factory()->NewAssignment(op, expression, right, pos);
-}
-
-
-Expression* Parser::ParseYieldExpression(bool* ok) {
- // YieldExpression ::
- // 'yield' '*'? AssignmentExpression
- int pos = peek_position();
- Expect(Token::YIELD, CHECK_OK);
- Yield::Kind kind =
- Check(Token::MUL) ? Yield::DELEGATING : Yield::SUSPEND;
- Expression* generator_object = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
- Expression* expression = ParseAssignmentExpression(false, CHECK_OK);
- Yield* yield = factory()->NewYield(generator_object, expression, kind, pos);
- if (kind == Yield::DELEGATING) {
- yield->set_index(current_function_state_->NextHandlerIndex());
- }
- return yield;
-}
-
-
-// Precedence = 3
-Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) {
- // ConditionalExpression ::
- // LogicalOrExpression
- // LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
-
- int pos = peek_position();
- // We start using the binary expression parser for prec >= 4 only!
- Expression* expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
- if (peek() != Token::CONDITIONAL) return expression;
- Consume(Token::CONDITIONAL);
- // In parsing the first assignment expression in conditional
- // expressions we always accept the 'in' keyword; see ECMA-262,
- // section 11.12, page 58.
- Expression* left = ParseAssignmentExpression(true, CHECK_OK);
- Expect(Token::COLON, CHECK_OK);
- Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- return factory()->NewConditional(expression, left, right, pos);
-}
-
-
-int ParserBase::Precedence(Token::Value tok, bool accept_IN) {
- if (tok == Token::IN && !accept_IN)
- return 0; // 0 precedence will terminate binary expression parsing
-
- return Token::Precedence(tok);
-}
-
-
-// Precedence >= 4
-Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
- ASSERT(prec >= 4);
- Expression* x = ParseUnaryExpression(CHECK_OK);
- for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
- // prec1 >= 4
- while (Precedence(peek(), accept_IN) == prec1) {
- Token::Value op = Next();
- int pos = position();
- Expression* y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
-
- // Compute some expressions involving only number literals.
- if (x && x->AsLiteral() && x->AsLiteral()->value()->IsNumber() &&
- y && y->AsLiteral() && y->AsLiteral()->value()->IsNumber()) {
- double x_val = x->AsLiteral()->value()->Number();
- double y_val = y->AsLiteral()->value()->Number();
-
- switch (op) {
- case Token::ADD:
- x = factory()->NewNumberLiteral(x_val + y_val, pos);
- continue;
- case Token::SUB:
- x = factory()->NewNumberLiteral(x_val - y_val, pos);
- continue;
- case Token::MUL:
- x = factory()->NewNumberLiteral(x_val * y_val, pos);
- continue;
- case Token::DIV:
- x = factory()->NewNumberLiteral(x_val / y_val, pos);
- continue;
- case Token::BIT_OR: {
- int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value, pos);
- continue;
- }
- case Token::BIT_AND: {
- int value = DoubleToInt32(x_val) & DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value, pos);
- continue;
- }
- case Token::BIT_XOR: {
- int value = DoubleToInt32(x_val) ^ DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value, pos);
- continue;
- }
- case Token::SHL: {
- int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
- x = factory()->NewNumberLiteral(value, pos);
- continue;
- }
- case Token::SHR: {
- uint32_t shift = DoubleToInt32(y_val) & 0x1f;
- uint32_t value = DoubleToUint32(x_val) >> shift;
- x = factory()->NewNumberLiteral(value, pos);
- continue;
- }
- case Token::SAR: {
- uint32_t shift = DoubleToInt32(y_val) & 0x1f;
- int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
- x = factory()->NewNumberLiteral(value, pos);
- continue;
- }
- default:
- break;
- }
- }
-
- // For now we distinguish between comparisons and other binary
- // operations. (We could combine the two and get rid of this
- // code and AST node eventually.)
- if (Token::IsCompareOp(op)) {
- // We have a comparison.
- Token::Value cmp = op;
- switch (op) {
- case Token::NE: cmp = Token::EQ; break;
- case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
- default: break;
- }
- x = factory()->NewCompareOperation(cmp, x, y, pos);
- if (cmp != op) {
- // The comparison was negated - add a NOT.
- x = factory()->NewUnaryOperation(Token::NOT, x, pos);
- }
-
- } else {
- // We have a "normal" binary operation.
- x = factory()->NewBinaryOperation(op, x, y, pos);
- }
- }
- }
- return x;
-}
-
-
-Expression* Parser::ParseUnaryExpression(bool* ok) {
- // UnaryExpression ::
- // PostfixExpression
- // 'delete' UnaryExpression
- // 'void' UnaryExpression
- // 'typeof' UnaryExpression
- // '++' UnaryExpression
- // '--' UnaryExpression
- // '+' UnaryExpression
- // '-' UnaryExpression
- // '~' UnaryExpression
- // '!' UnaryExpression
-
- Token::Value op = peek();
- if (Token::IsUnaryOp(op)) {
- op = Next();
- int pos = position();
- Expression* expression = ParseUnaryExpression(CHECK_OK);
-
- if (expression != NULL && (expression->AsLiteral() != NULL)) {
- Handle<Object> literal = expression->AsLiteral()->value();
- if (op == Token::NOT) {
- // Convert the literal to a boolean condition and negate it.
- bool condition = literal->BooleanValue();
- Handle<Object> result = isolate()->factory()->ToBoolean(!condition);
- return factory()->NewLiteral(result, pos);
- } else if (literal->IsNumber()) {
- // Compute some expressions involving only number literals.
- double value = literal->Number();
- switch (op) {
- case Token::ADD:
- return expression;
- case Token::SUB:
- return factory()->NewNumberLiteral(-value, pos);
- case Token::BIT_NOT:
- return factory()->NewNumberLiteral(~DoubleToInt32(value), pos);
- default:
- break;
- }
- }
- }
-
- // "delete identifier" is a syntax error in strict mode.
- if (op == Token::DELETE && !top_scope_->is_classic_mode()) {
- VariableProxy* operand = expression->AsVariableProxy();
- if (operand != NULL && !operand->is_this()) {
- ReportMessage("strict_delete", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- }
-
- // Desugar '+foo' into 'foo*1', this enables the collection of type feedback
- // without any special stub and the multiplication is removed later in
- // Crankshaft's canonicalization pass.
- if (op == Token::ADD) {
- return factory()->NewBinaryOperation(Token::MUL,
- expression,
- factory()->NewNumberLiteral(1, pos),
- pos);
- }
- // The same idea for '-foo' => 'foo*(-1)'.
- if (op == Token::SUB) {
- return factory()->NewBinaryOperation(Token::MUL,
- expression,
- factory()->NewNumberLiteral(-1, pos),
- pos);
- }
- // ...and one more time for '~foo' => 'foo^(~0)'.
- if (op == Token::BIT_NOT) {
- return factory()->NewBinaryOperation(Token::BIT_XOR,
- expression,
- factory()->NewNumberLiteral(~0, pos),
- pos);
- }
-
- return factory()->NewUnaryOperation(op, expression, pos);
-
- } else if (Token::IsCountOp(op)) {
- op = Next();
- Expression* expression = ParseUnaryExpression(CHECK_OK);
- // Signal a reference error if the expression is an invalid
- // left-hand side expression. We could report this as a syntax
- // error here but for compatibility with JSC we choose to report the
- // error at runtime.
- if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> message =
- isolate()->factory()->invalid_lhs_in_prefix_op_string();
- expression = NewThrowReferenceError(message);
- }
-
- if (!top_scope_->is_classic_mode()) {
- // Prefix expression operand in strict mode may not be eval or arguments.
- CheckStrictModeLValue(expression, CHECK_OK);
- }
- MarkAsLValue(expression);
-
- return factory()->NewCountOperation(op,
- true /* prefix */,
- expression,
- position());
-
- } else {
- return ParsePostfixExpression(ok);
- }
-}
-
-
-Expression* Parser::ParsePostfixExpression(bool* ok) {
- // PostfixExpression ::
- // LeftHandSideExpression ('++' | '--')?
-
- Expression* expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner().HasAnyLineTerminatorBeforeNext() &&
- Token::IsCountOp(peek())) {
- // Signal a reference error if the expression is an invalid
- // left-hand side expression. We could report this as a syntax
- // error here but for compatibility with JSC we choose to report the
- // error at runtime.
- if (expression == NULL || !expression->IsValidLeftHandSide()) {
- Handle<String> message =
- isolate()->factory()->invalid_lhs_in_postfix_op_string();
- expression = NewThrowReferenceError(message);
- }
-
- if (!top_scope_->is_classic_mode()) {
- // Postfix expression operand in strict mode may not be eval or arguments.
- CheckStrictModeLValue(expression, CHECK_OK);
- }
- MarkAsLValue(expression);
-
- Token::Value next = Next();
- expression =
- factory()->NewCountOperation(next,
- false /* postfix */,
- expression,
- position());
- }
- return expression;
-}
-
-
-Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
- // LeftHandSideExpression ::
- // (NewExpression | MemberExpression) ...
-
- Expression* result;
- if (peek() == Token::NEW) {
- result = ParseNewExpression(CHECK_OK);
- } else {
- result = ParseMemberExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case Token::LBRACK: {
- Consume(Token::LBRACK);
- int pos = position();
- Expression* index = ParseExpression(true, CHECK_OK);
- result = factory()->NewProperty(result, index, pos);
- Expect(Token::RBRACK, CHECK_OK);
- break;
- }
-
- case Token::LPAREN: {
- int pos;
- if (scanner().current_token() == Token::IDENTIFIER) {
- // For call of an identifier we want to report position of
- // the identifier as position of the call in the stack trace.
- pos = position();
- } else {
- // For other kinds of calls we record position of the parenthesis as
- // position of the call. Note that this is extremely important for
- // expressions of the form function(){...}() for which call position
- // should not point to the closing brace otherwise it will intersect
- // with positions recorded for function literal and confuse debugger.
- pos = peek_position();
- // Also the trailing parenthesis are a hint that the function will
- // be called immediately. If we happen to have parsed a preceding
- // function literal eagerly, we can also compile it eagerly.
- if (result->IsFunctionLiteral() && mode() == PARSE_EAGERLY) {
- result->AsFunctionLiteral()->set_parenthesized();
- }
- }
- ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
-
- // Keep track of eval() calls since they disable all local variable
- // optimizations.
- // The calls that need special treatment are the
- // direct eval calls. These calls are all of the form eval(...), with
- // no explicit receiver.
- // These calls are marked as potentially direct eval calls. Whether
- // they are actually direct calls to eval is determined at run time.
- VariableProxy* callee = result->AsVariableProxy();
- if (callee != NULL &&
- callee->IsVariable(isolate()->factory()->eval_string())) {
- top_scope_->DeclarationScope()->RecordEvalCall();
- }
- result = factory()->NewCall(result, args, pos);
- if (fni_ != NULL) fni_->RemoveLastFunction();
- break;
- }
-
- case Token::PERIOD: {
- Consume(Token::PERIOD);
- int pos = position();
- Handle<String> name = ParseIdentifierName(CHECK_OK);
- result = factory()->NewProperty(
- result, factory()->NewLiteral(name, pos), pos);
- if (fni_ != NULL) fni_->PushLiteralName(name);
- break;
- }
-
- default:
- return result;
- }
- }
-}
-
-
-Expression* Parser::ParseNewPrefix(PositionStack* stack, bool* ok) {
- // NewExpression ::
- // ('new')+ MemberExpression
-
- // The grammar for new expressions is pretty warped. The keyword
- // 'new' can either be a part of the new expression (where it isn't
- // followed by an argument list) or a part of the member expression,
- // where it must be followed by an argument list. To accommodate
- // this, we parse the 'new' keywords greedily and keep track of how
- // many we have parsed. This information is then passed on to the
- // member expression parser, which is only allowed to match argument
- // lists as long as it has 'new' prefixes left
- Expect(Token::NEW, CHECK_OK);
- PositionStack::Element pos(stack, position());
-
- Expression* result;
- if (peek() == Token::NEW) {
- result = ParseNewPrefix(stack, CHECK_OK);
- } else {
- result = ParseMemberWithNewPrefixesExpression(stack, CHECK_OK);
- }
-
- if (!stack->is_empty()) {
- int last = stack->pop();
- result = factory()->NewCallNew(
- result, new(zone()) ZoneList<Expression*>(0, zone()), last);
- }
- return result;
-}
-
-
-Expression* Parser::ParseNewExpression(bool* ok) {
- PositionStack stack(ok);
- return ParseNewPrefix(&stack, ok);
-}
-
-
-Expression* Parser::ParseMemberExpression(bool* ok) {
- return ParseMemberWithNewPrefixesExpression(NULL, ok);
-}
-
-
-Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
- bool* ok) {
- // MemberExpression ::
- // (PrimaryExpression | FunctionLiteral)
- // ('[' Expression ']' | '.' Identifier | Arguments)*
-
- // Parse the initial primary or function expression.
- Expression* result = NULL;
- if (peek() == Token::FUNCTION) {
- Consume(Token::FUNCTION);
- int function_token_position = position();
- bool is_generator = allow_generators() && Check(Token::MUL);
- Handle<String> name;
- bool is_strict_reserved_name = false;
- Scanner::Location function_name_location = Scanner::Location::invalid();
- if (peek_any_identifier()) {
- name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
- CHECK_OK);
- function_name_location = scanner().location();
- }
- FunctionLiteral::FunctionType function_type = name.is_null()
- ? FunctionLiteral::ANONYMOUS_EXPRESSION
- : FunctionLiteral::NAMED_EXPRESSION;
- result = ParseFunctionLiteral(name,
- function_name_location,
- is_strict_reserved_name,
- is_generator,
- function_token_position,
- function_type,
- CHECK_OK);
- } else {
- result = ParsePrimaryExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case Token::LBRACK: {
- Consume(Token::LBRACK);
- int pos = position();
- Expression* index = ParseExpression(true, CHECK_OK);
- result = factory()->NewProperty(result, index, pos);
- if (fni_ != NULL) {
- if (index->IsPropertyName()) {
- fni_->PushLiteralName(index->AsLiteral()->AsPropertyName());
- } else {
- fni_->PushLiteralName(
- isolate()->factory()->anonymous_function_string());
- }
- }
- Expect(Token::RBRACK, CHECK_OK);
- break;
- }
- case Token::PERIOD: {
- Consume(Token::PERIOD);
- int pos = position();
- Handle<String> name = ParseIdentifierName(CHECK_OK);
- result = factory()->NewProperty(
- result, factory()->NewLiteral(name, pos), pos);
- if (fni_ != NULL) fni_->PushLiteralName(name);
- break;
- }
- case Token::LPAREN: {
- if ((stack == NULL) || stack->is_empty()) return result;
- // Consume one of the new prefixes (already parsed).
- ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
- int pos = stack->pop();
- result = factory()->NewCallNew(result, args, pos);
- break;
- }
- default:
- return result;
- }
- }
-}
-
-
DebuggerStatement* Parser::ParseDebuggerStatement(bool* ok) {
// In ECMA-262 'debugger' is defined as a reserved keyword. In some browser
// contexts this is used as a statement which invokes the debugger as i a
@@ -3476,152 +3087,6 @@ void Parser::ReportInvalidPreparseData(Handle<String> name, bool* ok) {
}
-Expression* Parser::ParsePrimaryExpression(bool* ok) {
- // PrimaryExpression ::
- // 'this'
- // 'null'
- // 'true'
- // 'false'
- // Identifier
- // Number
- // String
- // ArrayLiteral
- // ObjectLiteral
- // RegExpLiteral
- // '(' Expression ')'
-
- int pos = peek_position();
- Expression* result = NULL;
- switch (peek()) {
- case Token::THIS: {
- Consume(Token::THIS);
- result = factory()->NewVariableProxy(top_scope_->receiver());
- break;
- }
-
- case Token::NULL_LITERAL:
- Consume(Token::NULL_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->null_value(), pos);
- break;
-
- case Token::TRUE_LITERAL:
- Consume(Token::TRUE_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->true_value(), pos);
- break;
-
- case Token::FALSE_LITERAL:
- Consume(Token::FALSE_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->false_value(), pos);
- break;
-
- case Token::IDENTIFIER:
- case Token::YIELD:
- case Token::FUTURE_STRICT_RESERVED_WORD: {
- // Using eval or arguments in this context is OK even in strict mode.
- Handle<String> name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
- if (fni_ != NULL) fni_->PushVariableName(name);
- // The name may refer to a module instance object, so its type is unknown.
-#ifdef DEBUG
- if (FLAG_print_interface_details)
- PrintF("# Variable %s ", name->ToAsciiArray());
-#endif
- Interface* interface = Interface::NewUnknown(zone());
- result = top_scope_->NewUnresolved(factory(), name, interface, pos);
- break;
- }
-
- case Token::NUMBER: {
- Consume(Token::NUMBER);
- ASSERT(scanner().is_literal_ascii());
- double value = StringToDouble(isolate()->unicode_cache(),
- scanner().literal_ascii_string(),
- ALLOW_HEX | ALLOW_OCTAL |
- ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
- result = factory()->NewNumberLiteral(value, pos);
- break;
- }
-
- case Token::STRING: {
- Consume(Token::STRING);
- Handle<String> symbol = GetSymbol();
- result = factory()->NewLiteral(symbol, pos);
- if (fni_ != NULL) fni_->PushLiteralName(symbol);
- break;
- }
-
- case Token::ASSIGN_DIV:
- result = ParseRegExpLiteral(true, CHECK_OK);
- break;
-
- case Token::DIV:
- result = ParseRegExpLiteral(false, CHECK_OK);
- break;
-
- case Token::LBRACK:
- result = ParseArrayLiteral(CHECK_OK);
- break;
-
- case Token::LBRACE:
- result = ParseObjectLiteral(CHECK_OK);
- break;
-
- case Token::LPAREN:
- Consume(Token::LPAREN);
- // Heuristically try to detect immediately called functions before
- // seeing the call parentheses.
- parenthesized_function_ = (peek() == Token::FUNCTION);
- result = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- break;
-
- case Token::MOD:
- if (allow_natives_syntax() || extension_ != NULL) {
- result = ParseV8Intrinsic(CHECK_OK);
- break;
- }
- // If we're not allowing special syntax we fall-through to the
- // default case.
-
- default: {
- Token::Value tok = Next();
- ReportUnexpectedToken(tok);
- *ok = false;
- return NULL;
- }
- }
-
- return result;
-}
-
-
-Expression* Parser::ParseArrayLiteral(bool* ok) {
- // ArrayLiteral ::
- // '[' Expression? (',' Expression?)* ']'
-
- int pos = peek_position();
- ZoneList<Expression*>* values = new(zone()) ZoneList<Expression*>(4, zone());
- Expect(Token::LBRACK, CHECK_OK);
- while (peek() != Token::RBRACK) {
- Expression* elem;
- if (peek() == Token::COMMA) {
- elem = GetLiteralTheHole(peek_position());
- } else {
- elem = ParseAssignmentExpression(true, CHECK_OK);
- }
- values->Add(elem, zone());
- if (peek() != Token::RBRACK) {
- Expect(Token::COMMA, CHECK_OK);
- }
- }
- Expect(Token::RBRACK, CHECK_OK);
-
- // Update the scope information before the pre-parsing bailout.
- int literal_index = current_function_state_->NextMaterializedLiteralIndex();
-
- return factory()->NewArrayLiteral(values, literal_index, pos);
-}
-
-
bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
if (expression->AsLiteral() != NULL) return true;
MaterializedLiteral* lit = expression->AsMaterializedLiteral();
@@ -3665,310 +3130,6 @@ Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
}
-Expression* Parser::ParseObjectLiteral(bool* ok) {
- // ObjectLiteral ::
- // '{' (
- // ((IdentifierName | String | Number) ':' AssignmentExpression)
- // | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
- // )*[','] '}'
-
- int pos = peek_position();
- ZoneList<ObjectLiteral::Property*>* properties =
- new(zone()) ZoneList<ObjectLiteral::Property*>(4, zone());
- int number_of_boilerplate_properties = 0;
- bool has_function = false;
-
- ObjectLiteralChecker checker(this, top_scope_->language_mode());
-
- Expect(Token::LBRACE, CHECK_OK);
-
- while (peek() != Token::RBRACE) {
- if (fni_ != NULL) fni_->Enter();
-
- Literal* key = NULL;
- Token::Value next = peek();
- int next_pos = peek_position();
-
- switch (next) {
- case Token::FUTURE_RESERVED_WORD:
- case Token::FUTURE_STRICT_RESERVED_WORD:
- case Token::IDENTIFIER: {
- bool is_getter = false;
- bool is_setter = false;
- Handle<String> id =
- ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
- if (fni_ != NULL) fni_->PushLiteralName(id);
-
- if ((is_getter || is_setter) && peek() != Token::COLON) {
- // Special handling of getter and setter syntax:
- // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
- // We have already read the "get" or "set" keyword.
- Token::Value next = Next();
- bool is_keyword = Token::IsKeyword(next);
- if (next != i::Token::IDENTIFIER &&
- next != i::Token::FUTURE_RESERVED_WORD &&
- next != i::Token::FUTURE_STRICT_RESERVED_WORD &&
- next != i::Token::NUMBER &&
- next != i::Token::STRING &&
- !is_keyword) {
- // Unexpected token.
- ReportUnexpectedToken(next);
- *ok = false;
- return NULL;
- }
- // Validate the property.
- PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
- checker.CheckProperty(next, type, CHECK_OK);
- Handle<String> name = is_keyword
- ? isolate_->factory()->InternalizeUtf8String(Token::String(next))
- : GetSymbol();
- FunctionLiteral* value =
- ParseFunctionLiteral(name,
- scanner().location(),
- false, // reserved words are allowed here
- false, // not a generator
- RelocInfo::kNoPosition,
- FunctionLiteral::ANONYMOUS_EXPRESSION,
- CHECK_OK);
- // Allow any number of parameters for compatibilty with JSC.
- // Specification only allows zero parameters for get and one for set.
- ObjectLiteral::Property* property =
- factory()->NewObjectLiteralProperty(is_getter, value, next_pos);
- if (ObjectLiteral::IsBoilerplateProperty(property)) {
- number_of_boilerplate_properties++;
- }
- properties->Add(property, zone());
- if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
-
- if (fni_ != NULL) {
- fni_->Infer();
- fni_->Leave();
- }
- continue; // restart the while
- }
- // Failed to parse as get/set property, so it's just a property
- // called "get" or "set".
- key = factory()->NewLiteral(id, next_pos);
- break;
- }
- case Token::STRING: {
- Consume(Token::STRING);
- Handle<String> string = GetSymbol();
- if (fni_ != NULL) fni_->PushLiteralName(string);
- uint32_t index;
- if (!string.is_null() && string->AsArrayIndex(&index)) {
- key = factory()->NewNumberLiteral(index, next_pos);
- break;
- }
- key = factory()->NewLiteral(string, next_pos);
- break;
- }
- case Token::NUMBER: {
- Consume(Token::NUMBER);
- ASSERT(scanner().is_literal_ascii());
- double value = StringToDouble(isolate()->unicode_cache(),
- scanner().literal_ascii_string(),
- ALLOW_HEX | ALLOW_OCTAL |
- ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
- key = factory()->NewNumberLiteral(value, next_pos);
- break;
- }
- default:
- if (Token::IsKeyword(next)) {
- Consume(next);
- Handle<String> string = GetSymbol();
- key = factory()->NewLiteral(string, next_pos);
- } else {
- // Unexpected token.
- Token::Value next = Next();
- ReportUnexpectedToken(next);
- *ok = false;
- return NULL;
- }
- }
-
- // Validate the property
- checker.CheckProperty(next, kValueProperty, CHECK_OK);
-
- Expect(Token::COLON, CHECK_OK);
- Expression* value = ParseAssignmentExpression(true, CHECK_OK);
-
- ObjectLiteral::Property* property =
- factory()->NewObjectLiteralProperty(key, value);
-
- // Mark top-level object literals that contain function literals and
- // pretenure the literal so it can be added as a constant function
- // property.
- if (top_scope_->DeclarationScope()->is_global_scope() &&
- value->AsFunctionLiteral() != NULL) {
- has_function = true;
- value->AsFunctionLiteral()->set_pretenure();
- }
-
- // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
- if (ObjectLiteral::IsBoilerplateProperty(property)) {
- number_of_boilerplate_properties++;
- }
- properties->Add(property, zone());
-
- // TODO(1240767): Consider allowing trailing comma.
- if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
-
- if (fni_ != NULL) {
- fni_->Infer();
- fni_->Leave();
- }
- }
- Expect(Token::RBRACE, CHECK_OK);
-
- // Computation of literal_index must happen before pre parse bailout.
- int literal_index = current_function_state_->NextMaterializedLiteralIndex();
-
- return factory()->NewObjectLiteral(properties,
- literal_index,
- number_of_boilerplate_properties,
- has_function,
- pos);
-}
-
-
-Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
- int pos = peek_position();
- if (!scanner().ScanRegExpPattern(seen_equal)) {
- Next();
- ReportMessage("unterminated_regexp", Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
-
- int literal_index = current_function_state_->NextMaterializedLiteralIndex();
-
- Handle<String> js_pattern = NextLiteralString(TENURED);
- scanner().ScanRegExpFlags();
- Handle<String> js_flags = NextLiteralString(TENURED);
- Next();
-
- return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
-}
-
-
-ZoneList<Expression*>* Parser::ParseArguments(bool* ok) {
- // Arguments ::
- // '(' (AssignmentExpression)*[','] ')'
-
- ZoneList<Expression*>* result = new(zone()) ZoneList<Expression*>(4, zone());
- Expect(Token::LPAREN, CHECK_OK);
- bool done = (peek() == Token::RPAREN);
- while (!done) {
- Expression* argument = ParseAssignmentExpression(true, CHECK_OK);
- result->Add(argument, zone());
- if (result->length() > Code::kMaxArguments) {
- ReportMessageAt(scanner().location(), "too_many_arguments",
- Vector<const char*>::empty());
- *ok = false;
- return NULL;
- }
- done = (peek() == Token::RPAREN);
- if (!done) Expect(Token::COMMA, CHECK_OK);
- }
- Expect(Token::RPAREN, CHECK_OK);
- return result;
-}
-
-
-class SingletonLogger : public ParserRecorder {
- public:
- SingletonLogger() : has_error_(false), start_(-1), end_(-1) { }
- virtual ~SingletonLogger() { }
-
- void Reset() { has_error_ = false; }
-
- virtual void LogFunction(int start,
- int end,
- int literals,
- int properties,
- LanguageMode mode) {
- ASSERT(!has_error_);
- start_ = start;
- end_ = end;
- literals_ = literals;
- properties_ = properties;
- mode_ = mode;
- };
-
- // Logs a symbol creation of a literal or identifier.
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
- virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
-
- // Logs an error message and marks the log as containing an error.
- // Further logging will be ignored, and ExtractData will return a vector
- // representing the error only.
- virtual void LogMessage(int start,
- int end,
- const char* message,
- const char* argument_opt) {
- if (has_error_) return;
- has_error_ = true;
- start_ = start;
- end_ = end;
- message_ = message;
- argument_opt_ = argument_opt;
- }
-
- virtual int function_position() { return 0; }
-
- virtual int symbol_position() { return 0; }
-
- virtual int symbol_ids() { return -1; }
-
- virtual Vector<unsigned> ExtractData() {
- UNREACHABLE();
- return Vector<unsigned>();
- }
-
- virtual void PauseRecording() { }
-
- virtual void ResumeRecording() { }
-
- bool has_error() { return has_error_; }
-
- int start() { return start_; }
- int end() { return end_; }
- int literals() {
- ASSERT(!has_error_);
- return literals_;
- }
- int properties() {
- ASSERT(!has_error_);
- return properties_;
- }
- LanguageMode language_mode() {
- ASSERT(!has_error_);
- return mode_;
- }
- const char* message() {
- ASSERT(has_error_);
- return message_;
- }
- const char* argument_opt() {
- ASSERT(has_error_);
- return argument_opt_;
- }
-
- private:
- bool has_error_;
- int start_;
- int end_;
- // For function entries.
- int literals_;
- int properties_;
- LanguageMode mode_;
- // For error messages.
- const char* message_;
- const char* argument_opt_;
-};
-
-
FunctionLiteral* Parser::ParseFunctionLiteral(
Handle<String> function_name,
Scanner::Location function_name_location,
@@ -4021,14 +3182,15 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// one relative to the deserialized scope chain. Otherwise we must be
// compiling a function in an inner declaration scope in the eval, e.g. a
// nested function, and hoisting works normally relative to that.
- Scope* declaration_scope = top_scope_->DeclarationScope();
+ Scope* declaration_scope = scope_->DeclarationScope();
Scope* original_declaration_scope = original_scope_->DeclarationScope();
Scope* scope =
- function_type == FunctionLiteral::DECLARATION && !is_extended_mode() &&
+ function_type == FunctionLiteral::DECLARATION &&
+ (!allow_harmony_scoping() || strict_mode() == SLOPPY) &&
(original_scope_ == original_declaration_scope ||
declaration_scope != original_declaration_scope)
? NewScope(declaration_scope, FUNCTION_SCOPE)
- : NewScope(top_scope_, FUNCTION_SCOPE);
+ : NewScope(scope_, FUNCTION_SCOPE);
ZoneList<Statement*>* body = NULL;
int materialized_literal_count = -1;
int expected_property_count = -1;
@@ -4041,23 +3203,23 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
FunctionLiteral::IsGeneratorFlag generator = is_generator
? FunctionLiteral::kIsGenerator
: FunctionLiteral::kNotGenerator;
+ DeferredFeedbackSlotProcessor* slot_processor;
AstProperties ast_properties;
BailoutReason dont_optimize_reason = kNoReason;
// Parse function body.
- { FunctionState function_state(this, scope);
- top_scope_->SetScopeName(function_name);
+ { FunctionState function_state(&function_state_, &scope_, scope, zone());
+ scope_->SetScopeName(function_name);
if (is_generator) {
// For generators, allocating variables in contexts is currently a win
// because it minimizes the work needed to suspend and resume an
// activation.
- top_scope_->ForceContextAllocation();
+ scope_->ForceContextAllocation();
// Calling a generator returns a generator object. That object is stored
// in a temporary variable, a definition that is used by "yield"
- // expressions. Presence of a variable for the generator object in the
- // FunctionState indicates that this function is a generator.
- Variable* temp = top_scope_->DeclarationScope()->NewTemporary(
+ // expressions. This also marks the FunctionState as a generator.
+ Variable* temp = scope_->DeclarationScope()->NewTemporary(
isolate()->factory()->dot_generator_object_string());
function_state.set_generator_object_variable(temp);
}
@@ -4065,7 +3227,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
- scope->set_start_position(scanner().location().beg_pos);
+ scope->set_start_position(scanner()->location().beg_pos);
// We don't yet know if the function will be strict, so we cannot yet
// produce errors for parameter names or duplicates. However, we remember
@@ -4082,21 +3244,20 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Store locations for possible future error reports.
if (!eval_args_error_log.IsValid() && IsEvalOrArguments(param_name)) {
- eval_args_error_log = scanner().location();
+ eval_args_error_log = scanner()->location();
}
if (!reserved_loc.IsValid() && is_strict_reserved) {
- reserved_loc = scanner().location();
+ reserved_loc = scanner()->location();
}
- if (!dupe_error_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
+ if (!dupe_error_loc.IsValid() && scope_->IsDeclared(param_name)) {
duplicate_parameters = FunctionLiteral::kHasDuplicateParameters;
- dupe_error_loc = scanner().location();
+ dupe_error_loc = scanner()->location();
}
- top_scope_->DeclareParameter(param_name, VAR);
+ scope_->DeclareParameter(param_name, VAR);
num_parameters++;
if (num_parameters > Code::kMaxArguments) {
- ReportMessageAt(scanner().location(), "too_many_parameters",
- Vector<const char*>::empty());
+ ReportMessageAt(scanner()->location(), "too_many_parameters");
*ok = false;
return NULL;
}
@@ -4114,21 +3275,28 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// future we can change the AST to only refer to VariableProxies
// instead of Variables and Proxis as is the case now.
Variable* fvar = NULL;
- Token::Value fvar_init_op = Token::INIT_CONST;
+ Token::Value fvar_init_op = Token::INIT_CONST_LEGACY;
if (function_type == FunctionLiteral::NAMED_EXPRESSION) {
- if (is_extended_mode()) fvar_init_op = Token::INIT_CONST_HARMONY;
- VariableMode fvar_mode = is_extended_mode() ? CONST_HARMONY : CONST;
- fvar = new(zone()) Variable(top_scope_,
+ if (allow_harmony_scoping() && strict_mode() == STRICT) {
+ fvar_init_op = Token::INIT_CONST;
+ }
+ VariableMode fvar_mode =
+ allow_harmony_scoping() && strict_mode() == STRICT ? CONST
+ : CONST_LEGACY;
+ fvar = new(zone()) Variable(scope_,
function_name, fvar_mode, true /* is valid LHS */,
Variable::NORMAL, kCreatedInitialized, Interface::NewConst());
VariableProxy* proxy = factory()->NewVariableProxy(fvar);
VariableDeclaration* fvar_declaration = factory()->NewVariableDeclaration(
- proxy, fvar_mode, top_scope_, RelocInfo::kNoPosition);
- top_scope_->DeclareFunctionVar(fvar_declaration);
+ proxy, fvar_mode, scope_, RelocInfo::kNoPosition);
+ scope_->DeclareFunctionVar(fvar_declaration);
}
- // Determine whether the function will be lazily compiled.
- // The heuristics are:
+ // Determine if the function can be parsed lazily. Lazy parsing is different
+ // from lazy compilation; we need to parse more eagerly than we compile.
+
+ // We can only parse lazily if we also compile lazily. The heuristics for
+ // lazy compilation are:
// - It must not have been prohibited by the caller to Parse (some callers
// need a full AST).
// - The outer scope must allow lazy compilation of inner functions.
@@ -4138,26 +3306,45 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// compiled.
// These are all things we can know at this point, without looking at the
// function itself.
- bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
- top_scope_->AllowsLazyCompilation() &&
- !parenthesized_function_);
+
+ // In addition, we need to distinguish between these cases:
+ // (function foo() {
+ // bar = function() { return 1; }
+ // })();
+ // and
+ // (function foo() {
+ // var a = 1;
+ // bar = function() { return a; }
+ // })();
+
+ // Now foo will be parsed eagerly and compiled eagerly (optimization: assume
+ // parenthesis before the function means that it will be called
+ // immediately). The inner function *must* be parsed eagerly to resolve the
+ // possible reference to the variable in foo's scope. However, it's possible
+ // that it will be compiled lazily.
+
+ // To make this additional case work, both Parser and PreParser implement a
+ // logic where only top-level functions will be parsed lazily.
+ bool is_lazily_parsed = (mode() == PARSE_LAZILY &&
+ scope_->AllowsLazyCompilation() &&
+ !parenthesized_function_);
parenthesized_function_ = false; // The bit was set for this function only.
- if (is_lazily_compiled) {
+ if (is_lazily_parsed) {
int function_block_pos = position();
FunctionEntry entry;
- if (pre_parse_data_ != NULL) {
- // If we have pre_parse_data_, we use it to skip parsing the function
- // body. The preparser data contains the information we need to
- // construct the lazy function.
- entry = pre_parse_data()->GetFunctionEntry(function_block_pos);
+ if (cached_data_mode_ == CONSUME_CACHED_DATA) {
+ // If we have cached data, we use it to skip parsing the function body.
+ // The data contains the information we need to construct the lazy
+ // function.
+ entry = (*cached_data())->GetFunctionEntry(function_block_pos);
if (entry.is_valid()) {
if (entry.end_pos() <= function_block_pos) {
// End position greater than end of stream is safe, and hard
// to check.
ReportInvalidPreparseData(function_name, CHECK_OK);
}
- scanner().SeekForward(entry.end_pos() - 1);
+ scanner()->SeekForward(entry.end_pos() - 1);
scope->set_end_position(entry.end_pos());
Expect(Token::RBRACE, CHECK_OK);
@@ -4165,14 +3352,23 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
scope->end_position() - function_block_pos);
materialized_literal_count = entry.literal_count();
expected_property_count = entry.property_count();
- top_scope_->SetLanguageMode(entry.language_mode());
+ scope_->SetStrictMode(entry.strict_mode());
} else {
- is_lazily_compiled = false;
+ // This case happens when we have preparse data but it doesn't contain
+ // an entry for the function. As a safety net, fall back to eager
+ // parsing. It is unclear whether PreParser's laziness analysis can
+ // produce different results than the Parser's laziness analysis (see
+ // https://codereview.chromium.org/7565003 ). In this case, we must
+ // discard all the preparse data, since the symbol data will be wrong.
+ is_lazily_parsed = false;
+ cached_data_mode_ = NO_CACHED_DATA;
}
} else {
- // With no preparser data, we partially parse the function, without
+ // With no cached data, we partially parse the function, without
// building an AST. This gathers the data needed to build a lazy
// function.
+ // FIXME(marja): Now the PreParser doesn't need to log functions /
+ // symbols; only errors -> clean that up.
SingletonLogger logger;
PreParser::PreParseResult result = LazyParseFunctionLiteral(&logger);
if (result == PreParser::kPreParseStackOverflow) {
@@ -4187,8 +3383,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (arg != NULL) {
args = Vector<const char*>(&arg, 1);
}
- ReportMessageAt(Scanner::Location(logger.start(), logger.end()),
- logger.message(), args);
+ ParserTraits::ReportMessageAt(
+ Scanner::Location(logger.start(), logger.end()),
+ logger.message(),
+ args);
*ok = false;
return NULL;
}
@@ -4198,15 +3396,26 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
scope->end_position() - function_block_pos);
materialized_literal_count = logger.literals();
expected_property_count = logger.properties();
- top_scope_->SetLanguageMode(logger.language_mode());
+ scope_->SetStrictMode(logger.strict_mode());
+ if (cached_data_mode_ == PRODUCE_CACHED_DATA) {
+ ASSERT(log_);
+ // Position right after terminal '}'.
+ int body_end = scanner()->location().end_pos;
+ log_->LogFunction(function_block_pos, body_end,
+ materialized_literal_count,
+ expected_property_count,
+ scope_->strict_mode());
+ }
}
}
- if (!is_lazily_compiled) {
+ if (!is_lazily_parsed) {
+ // Everything inside an eagerly parsed function will be parsed eagerly
+ // (see comment above).
ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
body = new(zone()) ZoneList<Statement*>(8, zone());
if (fvar != NULL) {
- VariableProxy* fproxy = top_scope_->NewUnresolved(
+ VariableProxy* fproxy = scope_->NewUnresolved(
factory(), function_name, Interface::NewConst());
fproxy->BindTo(fvar);
body->Add(factory()->NewExpressionStatement(
@@ -4223,14 +3432,14 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
new(zone()) ZoneList<Expression*>(0, zone());
CallRuntime* allocation = factory()->NewCallRuntime(
isolate()->factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kCreateJSGeneratorObject),
+ Runtime::FunctionForId(Runtime::kHiddenCreateJSGeneratorObject),
arguments, pos);
VariableProxy* init_proxy = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
+ function_state_->generator_object_variable());
Assignment* assignment = factory()->NewAssignment(
Token::INIT_VAR, init_proxy, allocation, RelocInfo::kNoPosition);
VariableProxy* get_proxy = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
+ function_state_->generator_object_variable());
Yield* yield = factory()->NewYield(
get_proxy, assignment, Yield::INITIAL, RelocInfo::kNoPosition);
body->Add(factory()->NewExpressionStatement(
@@ -4241,7 +3450,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (is_generator) {
VariableProxy* get_proxy = factory()->NewVariableProxy(
- current_function_state_->generator_object_variable());
+ function_state_->generator_object_variable());
Expression *undefined = factory()->NewLiteral(
isolate()->factory()->undefined_value(), RelocInfo::kNoPosition);
Yield* yield = factory()->NewYield(
@@ -4255,40 +3464,34 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
handler_count = function_state.handler_count();
Expect(Token::RBRACE, CHECK_OK);
- scope->set_end_position(scanner().location().end_pos);
+ scope->set_end_position(scanner()->location().end_pos);
}
// Validate strict mode. We can do this only after parsing the function,
// since the function can declare itself strict.
- if (!top_scope_->is_classic_mode()) {
+ if (strict_mode() == STRICT) {
if (IsEvalOrArguments(function_name)) {
- ReportMessageAt(function_name_location,
- "strict_eval_arguments",
- Vector<const char*>::empty());
+ ReportMessageAt(function_name_location, "strict_eval_arguments");
*ok = false;
return NULL;
}
if (name_is_strict_reserved) {
- ReportMessageAt(function_name_location, "unexpected_strict_reserved",
- Vector<const char*>::empty());
+ ReportMessageAt(function_name_location, "unexpected_strict_reserved");
*ok = false;
return NULL;
}
if (eval_args_error_log.IsValid()) {
- ReportMessageAt(eval_args_error_log, "strict_eval_arguments",
- Vector<const char*>::empty());
+ ReportMessageAt(eval_args_error_log, "strict_eval_arguments");
*ok = false;
return NULL;
}
if (dupe_error_loc.IsValid()) {
- ReportMessageAt(dupe_error_loc, "strict_param_dupe",
- Vector<const char*>::empty());
+ ReportMessageAt(dupe_error_loc, "strict_param_dupe");
*ok = false;
return NULL;
}
if (reserved_loc.IsValid()) {
- ReportMessageAt(reserved_loc, "unexpected_strict_reserved",
- Vector<const char*>::empty());
+ ReportMessageAt(reserved_loc, "unexpected_strict_reserved");
*ok = false;
return NULL;
}
@@ -4297,10 +3500,11 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
CHECK_OK);
}
ast_properties = *factory()->visitor()->ast_properties();
+ slot_processor = factory()->visitor()->slot_processor();
dont_optimize_reason = factory()->visitor()->dont_optimize_reason();
}
- if (is_extended_mode()) {
+ if (allow_harmony_scoping() && strict_mode() == STRICT) {
CheckConflictingVarDeclarations(scope, CHECK_OK);
}
@@ -4320,6 +3524,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
pos);
function_literal->set_function_token_position(function_token_pos);
function_literal->set_ast_properties(&ast_properties);
+ function_literal->set_slot_processor(slot_processor);
function_literal->set_dont_optimize_reason(dont_optimize_reason);
if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
@@ -4330,7 +3535,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
SingletonLogger* logger) {
HistogramTimerScope preparse_scope(isolate()->counters()->pre_parse());
- ASSERT_EQ(Token::LBRACE, scanner().current_token());
+ ASSERT_EQ(Token::LBRACE, scanner()->current_token());
if (reusable_preparser_ == NULL) {
intptr_t stack_limit = isolate()->stack_guard()->real_climit();
@@ -4345,7 +3550,7 @@ PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
allow_harmony_numeric_literals());
}
PreParser::PreParseResult result =
- reusable_preparser_->PreParseLazyFunction(top_scope_->language_mode(),
+ reusable_preparser_->PreParseLazyFunction(strict_mode(),
is_generator(),
logger);
return result;
@@ -4365,7 +3570,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
if (extension_ != NULL) {
// The extension structures are only accessible while parsing the
// very first time not when reparsing because of lazy compilation.
- top_scope_->DeclarationScope()->ForceEagerCompilation();
+ scope_->DeclarationScope()->ForceEagerCompilation();
}
const Runtime::Function* function = Runtime::FunctionForName(name);
@@ -4397,7 +3602,8 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
// Check that the function is defined if it's an inline runtime call.
if (function == NULL && name->Get(0) == '_') {
- ReportMessage("not_defined", Vector<Handle<String> >(&name, 1));
+ ParserTraits::ReportMessage("not_defined",
+ Vector<Handle<String> >(&name, 1));
*ok = false;
return NULL;
}
@@ -4407,199 +3613,12 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
}
-bool ParserBase::peek_any_identifier() {
- Token::Value next = peek();
- return next == Token::IDENTIFIER ||
- next == Token::FUTURE_RESERVED_WORD ||
- next == Token::FUTURE_STRICT_RESERVED_WORD ||
- next == Token::YIELD;
-}
-
-
-bool ParserBase::CheckContextualKeyword(Vector<const char> keyword) {
- if (peek() == Token::IDENTIFIER &&
- scanner()->is_next_contextual_keyword(keyword)) {
- Consume(Token::IDENTIFIER);
- return true;
- }
- return false;
-}
-
-
-void ParserBase::ExpectSemicolon(bool* ok) {
- // Check for automatic semicolon insertion according to
- // the rules given in ECMA-262, section 7.9, page 21.
- Token::Value tok = peek();
- if (tok == Token::SEMICOLON) {
- Next();
- return;
- }
- if (scanner()->HasAnyLineTerminatorBeforeNext() ||
- tok == Token::RBRACE ||
- tok == Token::EOS) {
- return;
- }
- Expect(Token::SEMICOLON, ok);
-}
-
-
-void ParserBase::ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
- Expect(Token::IDENTIFIER, ok);
- if (!*ok) return;
- if (!scanner()->is_literal_contextual_keyword(keyword)) {
- ReportUnexpectedToken(scanner()->current_token());
- *ok = false;
- }
-}
-
-
-void ParserBase::ReportUnexpectedToken(Token::Value token) {
- // We don't report stack overflows here, to avoid increasing the
- // stack depth even further. Instead we report it after parsing is
- // over, in ParseProgram.
- if (token == Token::ILLEGAL && stack_overflow()) {
- return;
- }
- Scanner::Location source_location = scanner()->location();
-
- // Four of the tokens are treated specially
- switch (token) {
- case Token::EOS:
- return ReportMessageAt(source_location, "unexpected_eos");
- case Token::NUMBER:
- return ReportMessageAt(source_location, "unexpected_token_number");
- case Token::STRING:
- return ReportMessageAt(source_location, "unexpected_token_string");
- case Token::IDENTIFIER:
- return ReportMessageAt(source_location,
- "unexpected_token_identifier");
- case Token::FUTURE_RESERVED_WORD:
- return ReportMessageAt(source_location, "unexpected_reserved");
- case Token::YIELD:
- case Token::FUTURE_STRICT_RESERVED_WORD:
- return ReportMessageAt(source_location,
- is_classic_mode() ? "unexpected_token_identifier"
- : "unexpected_strict_reserved");
- default:
- const char* name = Token::String(token);
- ASSERT(name != NULL);
- ReportMessageAt(
- source_location, "unexpected_token", Vector<const char*>(&name, 1));
- }
-}
-
-
Literal* Parser::GetLiteralUndefined(int position) {
return factory()->NewLiteral(
isolate()->factory()->undefined_value(), position);
}
-Literal* Parser::GetLiteralTheHole(int position) {
- return factory()->NewLiteral(
- isolate()->factory()->the_hole_value(), RelocInfo::kNoPosition);
-}
-
-
-// Parses an identifier that is valid for the current scope, in particular it
-// fails on strict mode future reserved keywords in a strict scope. If
-// allow_eval_or_arguments is kAllowEvalOrArguments, we allow "eval" or
-// "arguments" as identifier even in strict mode (this is needed in cases like
-// "var foo = eval;").
-Handle<String> Parser::ParseIdentifier(
- AllowEvalOrArgumentsAsIdentifier allow_eval_or_arguments,
- bool* ok) {
- Token::Value next = Next();
- if (next == Token::IDENTIFIER) {
- Handle<String> name = GetSymbol();
- if (allow_eval_or_arguments == kDontAllowEvalOrArguments &&
- !top_scope_->is_classic_mode() && IsEvalOrArguments(name)) {
- ReportMessage("strict_eval_arguments", Vector<const char*>::empty());
- *ok = false;
- }
- return name;
- } else if (top_scope_->is_classic_mode() &&
- (next == Token::FUTURE_STRICT_RESERVED_WORD ||
- (next == Token::YIELD && !is_generator()))) {
- return GetSymbol();
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return Handle<String>();
- }
-}
-
-
-// Parses and identifier or a strict mode future reserved word, and indicate
-// whether it is strict mode future reserved.
-Handle<String> Parser::ParseIdentifierOrStrictReservedWord(
- bool* is_strict_reserved, bool* ok) {
- Token::Value next = Next();
- if (next == Token::IDENTIFIER) {
- *is_strict_reserved = false;
- } else if (next == Token::FUTURE_STRICT_RESERVED_WORD ||
- (next == Token::YIELD && !is_generator())) {
- *is_strict_reserved = true;
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return Handle<String>();
- }
- return GetSymbol();
-}
-
-
-Handle<String> Parser::ParseIdentifierName(bool* ok) {
- Token::Value next = Next();
- if (next != Token::IDENTIFIER &&
- next != Token::FUTURE_RESERVED_WORD &&
- next != Token::FUTURE_STRICT_RESERVED_WORD &&
- !Token::IsKeyword(next)) {
- ReportUnexpectedToken(next);
- *ok = false;
- return Handle<String>();
- }
- return GetSymbol();
-}
-
-
-void Parser::MarkAsLValue(Expression* expression) {
- VariableProxy* proxy = expression != NULL
- ? expression->AsVariableProxy()
- : NULL;
-
- if (proxy != NULL) proxy->MarkAsLValue();
-}
-
-
-// Checks LHS expression for assignment and prefix/postfix increment/decrement
-// in strict mode.
-void Parser::CheckStrictModeLValue(Expression* expression,
- bool* ok) {
- ASSERT(!top_scope_->is_classic_mode());
- VariableProxy* lhs = expression != NULL
- ? expression->AsVariableProxy()
- : NULL;
-
- if (lhs != NULL && !lhs->is_this() && IsEvalOrArguments(lhs->name())) {
- ReportMessage("strict_eval_arguments", Vector<const char*>::empty());
- *ok = false;
- }
-}
-
-
-// Checks whether an octal literal was last seen between beg_pos and end_pos.
-// If so, reports an error. Only called for strict mode.
-void ParserBase::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- Scanner::Location octal = scanner()->octal_position();
- if (octal.IsValid() && beg_pos <= octal.beg_pos && octal.end_pos <= end_pos) {
- ReportMessageAt(octal, "strict_octal_literal");
- scanner()->clear_octal_position();
- *ok = false;
- }
-}
-
-
void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
Declaration* decl = scope->CheckConflictingVarDeclarations();
if (decl != NULL) {
@@ -4613,28 +3632,12 @@ void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
Scanner::Location location = position == RelocInfo::kNoPosition
? Scanner::Location::invalid()
: Scanner::Location(position, position + 1);
- ReportMessageAt(location, "redeclaration", args);
+ ParserTraits::ReportMessageAt(location, "redeclaration", args);
*ok = false;
}
}
-// This function reads an identifier name and determines whether or not it
-// is 'get' or 'set'.
-Handle<String> Parser::ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok) {
- Handle<String> result = ParseIdentifierName(ok);
- if (!*ok) return Handle<String>();
- if (scanner().is_literal_ascii() && scanner().literal_length() == 3) {
- const char* token = scanner().literal_ascii_string().start();
- *is_get = strncmp(token, "get", 3) == 0;
- *is_set = !*is_get && strncmp(token, "set", 3) == 0;
- }
- return result;
-}
-
-
// ----------------------------------------------------------------------------
// Parser support
@@ -4818,6 +3821,7 @@ bool RegExpParser::simple() {
RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
failed_ = true;
*error_ = isolate()->factory()->NewStringFromAscii(message, NOT_TENURED);
+ ASSERT(!error_->is_null());
// Zip to the end to make sure the no more input is read.
current_ = kEndMarker;
next_pos_ = in()->length();
@@ -5333,7 +4337,7 @@ bool RegExpParser::ParseIntervalQuantifier(int* min_out, int* max_out) {
uc32 RegExpParser::ParseOctalLiteral() {
- ASSERT('0' <= current() && current() <= '7');
+ ASSERT(('0' <= current() && current() <= '7') || current() == kEndMarker);
// For compatibility with some other browsers (not all), we parse
// up to three octal digits with a value below 256.
uc32 value = current() - '0';
@@ -5677,13 +4681,14 @@ bool Parser::Parse() {
result = ParseProgram();
}
} else {
- ScriptDataImpl* pre_parse_data = info()->pre_parse_data();
- set_pre_parse_data(pre_parse_data);
- if (pre_parse_data != NULL && pre_parse_data->has_error()) {
- Scanner::Location loc = pre_parse_data->MessageLocation();
- const char* message = pre_parse_data->BuildMessage();
- Vector<const char*> args = pre_parse_data->BuildArgs();
- ReportMessageAt(loc, message, args);
+ SetCachedData(info()->cached_data(), info()->cached_data_mode());
+ if (info()->cached_data_mode() == CONSUME_CACHED_DATA &&
+ (*info()->cached_data())->has_error()) {
+ ScriptDataImpl* cached_data = *(info()->cached_data());
+ Scanner::Location loc = cached_data->MessageLocation();
+ const char* message = cached_data->BuildMessage();
+ Vector<const char*> args = cached_data->BuildArgs();
+ ParserTraits::ReportMessageAt(loc, message, args);
DeleteArray(message);
for (int i = 0; i < args.length(); i++) {
DeleteArray(args[i]);
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 2b0995ace..f49626766 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -30,16 +30,18 @@
#include "allocation.h"
#include "ast.h"
+#include "compiler.h" // For CachedDataMode
#include "preparse-data-format.h"
#include "preparse-data.h"
#include "scopes.h"
#include "preparser.h"
namespace v8 {
+class ScriptCompiler;
+
namespace internal {
class CompilationInfo;
-class FuncNameInferrer;
class ParserLog;
class PositionStack;
class Target;
@@ -54,7 +56,7 @@ class FunctionEntry BASE_EMBEDDED {
kEndPositionIndex,
kLiteralCountIndex,
kPropertyCountIndex,
- kLanguageModeIndex,
+ kStrictModeIndex,
kSize
};
@@ -67,11 +69,10 @@ class FunctionEntry BASE_EMBEDDED {
int end_pos() { return backing_[kEndPositionIndex]; }
int literal_count() { return backing_[kLiteralCountIndex]; }
int property_count() { return backing_[kPropertyCountIndex]; }
- LanguageMode language_mode() {
- ASSERT(backing_[kLanguageModeIndex] == CLASSIC_MODE ||
- backing_[kLanguageModeIndex] == STRICT_MODE ||
- backing_[kLanguageModeIndex] == EXTENDED_MODE);
- return static_cast<LanguageMode>(backing_[kLanguageModeIndex]);
+ StrictMode strict_mode() {
+ ASSERT(backing_[kStrictModeIndex] == SLOPPY ||
+ backing_[kStrictModeIndex] == STRICT);
+ return static_cast<StrictMode>(backing_[kStrictModeIndex]);
}
bool is_valid() { return !backing_.is_empty(); }
@@ -119,6 +120,7 @@ class ScriptDataImpl : public ScriptData {
unsigned version() { return store_[PreparseDataConstants::kVersionOffset]; }
private:
+ friend class v8::ScriptCompiler;
Vector<unsigned> store_;
unsigned char* symbol_data_;
unsigned char* symbol_data_end_;
@@ -404,10 +406,198 @@ class RegExpParser BASE_EMBEDDED {
// ----------------------------------------------------------------------------
// JAVASCRIPT PARSING
-// Forward declaration.
+class Parser;
class SingletonLogger;
-class Parser : public ParserBase {
+class ParserTraits {
+ public:
+ struct Type {
+ // TODO(marja): To be removed. The Traits object should contain all the data
+ // it needs.
+ typedef v8::internal::Parser* Parser;
+
+ // Used by FunctionState and BlockState.
+ typedef v8::internal::Scope Scope;
+ typedef Variable GeneratorVariable;
+ typedef v8::internal::Zone Zone;
+
+ // Return types for traversing functions.
+ typedef Handle<String> Identifier;
+ typedef v8::internal::Expression* Expression;
+ typedef Yield* YieldExpression;
+ typedef v8::internal::FunctionLiteral* FunctionLiteral;
+ typedef v8::internal::Literal* Literal;
+ typedef ObjectLiteral::Property* ObjectLiteralProperty;
+ typedef ZoneList<v8::internal::Expression*>* ExpressionList;
+ typedef ZoneList<ObjectLiteral::Property*>* PropertyList;
+
+ // For constructing objects returned by the traversing functions.
+ typedef AstNodeFactory<AstConstructionVisitor> Factory;
+ };
+
+ explicit ParserTraits(Parser* parser) : parser_(parser) {}
+
+ // Custom operations executed when FunctionStates are created and destructed.
+ template<typename FunctionState>
+ static void SetUpFunctionState(FunctionState* function_state, Zone* zone) {
+ Isolate* isolate = zone->isolate();
+ function_state->isolate_ = isolate;
+ function_state->saved_ast_node_id_ = isolate->ast_node_id();
+ isolate->set_ast_node_id(BailoutId::FirstUsable().ToInt());
+ }
+
+ template<typename FunctionState>
+ static void TearDownFunctionState(FunctionState* function_state) {
+ if (function_state->outer_function_state_ != NULL) {
+ function_state->isolate_->set_ast_node_id(
+ function_state->saved_ast_node_id_);
+ }
+ }
+
+ // Helper functions for recursive descent.
+ bool IsEvalOrArguments(Handle<String> identifier) const;
+
+ // Returns true if the expression is of type "this.foo".
+ static bool IsThisProperty(Expression* expression);
+
+ static bool IsIdentifier(Expression* expression);
+
+ static bool IsBoilerplateProperty(ObjectLiteral::Property* property) {
+ return ObjectLiteral::IsBoilerplateProperty(property);
+ }
+
+ static bool IsArrayIndex(Handle<String> string, uint32_t* index) {
+ return !string.is_null() && string->AsArrayIndex(index);
+ }
+
+ // Functions for encapsulating the differences between parsing and preparsing;
+ // operations interleaved with the recursive descent.
+ static void PushLiteralName(FuncNameInferrer* fni, Handle<String> id) {
+ fni->PushLiteralName(id);
+ }
+ void PushPropertyName(FuncNameInferrer* fni, Expression* expression);
+
+ static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
+ Scope* scope, Expression* value, bool* has_function) {
+ if (scope->DeclarationScope()->is_global_scope() &&
+ value->AsFunctionLiteral() != NULL) {
+ *has_function = true;
+ value->AsFunctionLiteral()->set_pretenure();
+ }
+ }
+
+ // If we assign a function literal to a property we pretenure the
+ // literal so it can be added as a constant function property.
+ static void CheckAssigningFunctionLiteralToProperty(Expression* left,
+ Expression* right);
+
+ // Keep track of eval() calls since they disable all local variable
+ // optimizations. This checks if expression is an eval call, and if yes,
+ // forwards the information to scope.
+ void CheckPossibleEvalCall(Expression* expression, Scope* scope);
+
+ // Determine if the expression is a variable proxy and mark it as being used
+ // in an assignment or with a increment/decrement operator. This is currently
+ // used on for the statically checking assignments to harmony const bindings.
+ static Expression* MarkExpressionAsLValue(Expression* expression);
+
+ // Checks LHS expression for assignment and prefix/postfix increment/decrement
+ // in strict mode.
+ void CheckStrictModeLValue(Expression* expression, bool* ok);
+
+ // Returns true if we have a binary expression between two numeric
+ // literals. In that case, *x will be changed to an expression which is the
+ // computed value.
+ bool ShortcutNumericLiteralBinaryExpression(
+ Expression** x, Expression* y, Token::Value op, int pos,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+
+ // Rewrites the following types of unary expressions:
+ // not <literal> -> true / false
+ // + <numeric literal> -> <numeric literal>
+ // - <numeric literal> -> <numeric literal with value negated>
+ // ! <literal> -> true / false
+ // The following rewriting rules enable the collection of type feedback
+ // without any special stub and the multiplication is removed later in
+ // Crankshaft's canonicalization pass.
+ // + foo -> foo * 1
+ // - foo -> foo * (-1)
+ // ~ foo -> foo ^(~0)
+ Expression* BuildUnaryExpression(
+ Expression* expression, Token::Value op, int pos,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+
+ // Reporting errors.
+ void ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ Vector<const char*> args,
+ bool is_reference_error = false);
+ void ReportMessage(const char* message,
+ Vector<Handle<String> > args,
+ bool is_reference_error = false);
+ void ReportMessageAt(Scanner::Location source_location,
+ const char* message,
+ Vector<Handle<String> > args,
+ bool is_reference_error = false);
+
+ // "null" return type creators.
+ static Handle<String> EmptyIdentifier() {
+ return Handle<String>();
+ }
+ static Expression* EmptyExpression() {
+ return NULL;
+ }
+ static Literal* EmptyLiteral() {
+ return NULL;
+ }
+ // Used in error return values.
+ static ZoneList<Expression*>* NullExpressionList() {
+ return NULL;
+ }
+
+ // Odd-ball literal creators.
+ Literal* GetLiteralTheHole(int position,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+
+ // Producing data during the recursive descent.
+ Handle<String> GetSymbol(Scanner* scanner = NULL);
+ Handle<String> NextLiteralString(Scanner* scanner,
+ PretenureFlag tenured);
+ Expression* ThisExpression(Scope* scope,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+ Literal* ExpressionFromLiteral(
+ Token::Value token, int pos, Scanner* scanner,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+ Expression* ExpressionFromIdentifier(
+ Handle<String> name, int pos, Scope* scope,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+ Expression* ExpressionFromString(
+ int pos, Scanner* scanner,
+ AstNodeFactory<AstConstructionVisitor>* factory);
+ ZoneList<v8::internal::Expression*>* NewExpressionList(int size, Zone* zone) {
+ return new(zone) ZoneList<v8::internal::Expression*>(size, zone);
+ }
+ ZoneList<ObjectLiteral::Property*>* NewPropertyList(int size, Zone* zone) {
+ return new(zone) ZoneList<ObjectLiteral::Property*>(size, zone);
+ }
+
+ // Temporary glue; these functions will move to ParserBase.
+ Expression* ParseV8Intrinsic(bool* ok);
+ FunctionLiteral* ParseFunctionLiteral(
+ Handle<String> name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ int function_token_position,
+ FunctionLiteral::FunctionType type,
+ bool* ok);
+
+ private:
+ Parser* parser_;
+};
+
+
+class Parser : public ParserBase<ParserTraits> {
public:
explicit Parser(CompilationInfo* info);
~Parser() {
@@ -427,12 +617,16 @@ class Parser : public ParserBase {
bool Parse();
private:
- static const int kMaxNumFunctionLocals = 131071; // 2^17-1
+ friend class ParserTraits;
- enum Mode {
- PARSE_LAZILY,
- PARSE_EAGERLY
- };
+ // Limit the allowed number of local variables in a function. The hard limit
+ // is that offsets computed by FullCodeGenerator::StackOperand and similar
+ // functions are ints, and they should not overflow. In addition, accessing
+ // local variables creates user-controlled constants in the generated code,
+ // and we don't want too much user-controlled memory inside the code (this was
+ // the reason why this limit was introduced in the first place; see
+ // https://codereview.chromium.org/7003030/ ).
+ static const int kMaxNumFunctionLocals = 4194303; // 2^22-1
enum VariableDeclarationContext {
kModuleElement,
@@ -447,84 +641,6 @@ class Parser : public ParserBase {
kHasNoInitializers
};
- class BlockState;
-
- class FunctionState BASE_EMBEDDED {
- public:
- FunctionState(Parser* parser, Scope* scope);
- ~FunctionState();
-
- int NextMaterializedLiteralIndex() {
- return next_materialized_literal_index_++;
- }
- int materialized_literal_count() {
- return next_materialized_literal_index_ - JSFunction::kLiteralsPrefixSize;
- }
-
- int NextHandlerIndex() { return next_handler_index_++; }
- int handler_count() { return next_handler_index_; }
-
- void AddProperty() { expected_property_count_++; }
- int expected_property_count() { return expected_property_count_; }
-
- void set_generator_object_variable(Variable *variable) {
- ASSERT(variable != NULL);
- ASSERT(!is_generator());
- generator_object_variable_ = variable;
- }
- Variable* generator_object_variable() const {
- return generator_object_variable_;
- }
- bool is_generator() const {
- return generator_object_variable_ != NULL;
- }
-
- AstNodeFactory<AstConstructionVisitor>* factory() { return &factory_; }
-
- private:
- // Used to assign an index to each literal that needs materialization in
- // the function. Includes regexp literals, and boilerplate for object and
- // array literals.
- int next_materialized_literal_index_;
-
- // Used to assign a per-function index to try and catch handlers.
- int next_handler_index_;
-
- // Properties count estimation.
- int expected_property_count_;
-
- // For generators, the variable that holds the generator object. This
- // variable is used by yield expressions and return statements. NULL
- // indicates that this function is not a generator.
- Variable* generator_object_variable_;
-
- Parser* parser_;
- FunctionState* outer_function_state_;
- Scope* outer_scope_;
- int saved_ast_node_id_;
- AstNodeFactory<AstConstructionVisitor> factory_;
- };
-
- class ParsingModeScope BASE_EMBEDDED {
- public:
- ParsingModeScope(Parser* parser, Mode mode)
- : parser_(parser),
- old_mode_(parser->mode()) {
- parser_->mode_ = mode;
- }
- ~ParsingModeScope() {
- parser_->mode_ = old_mode_;
- }
-
- private:
- Parser* parser_;
- Mode old_mode_;
- };
-
- virtual bool is_classic_mode() {
- return top_scope_->is_classic_mode();
- }
-
// Returns NULL if parsing failed.
FunctionLiteral* ParseProgram();
@@ -532,7 +648,6 @@ class Parser : public ParserBase {
FunctionLiteral* ParseLazy(Utf16CharacterStream* source);
Isolate* isolate() { return isolate_; }
- Zone* zone() const { return zone_; }
CompilationInfo* info() const { return info_; }
// Called by ParseProgram after setting up the scanner.
@@ -541,39 +656,27 @@ class Parser : public ParserBase {
// Report syntax error
void ReportInvalidPreparseData(Handle<String> name, bool* ok);
- void ReportMessage(const char* message, Vector<const char*> args);
- void ReportMessage(const char* message, Vector<Handle<String> > args);
- void ReportMessageAt(Scanner::Location location, const char* type) {
- ReportMessageAt(location, type, Vector<const char*>::empty());
- }
- void ReportMessageAt(Scanner::Location loc,
- const char* message,
- Vector<const char*> args);
- void ReportMessageAt(Scanner::Location loc,
- const char* message,
- Vector<Handle<String> > args);
- void set_pre_parse_data(ScriptDataImpl *data) {
- pre_parse_data_ = data;
- symbol_cache_.Initialize(data ? data->symbol_count() : 0, zone());
+ void SetCachedData(ScriptDataImpl** data,
+ CachedDataMode cached_data_mode) {
+ cached_data_mode_ = cached_data_mode;
+ if (cached_data_mode == NO_CACHED_DATA) {
+ cached_data_ = NULL;
+ } else {
+ ASSERT(data != NULL);
+ cached_data_ = data;
+ symbol_cache_.Initialize(*data ? (*data)->symbol_count() : 0, zone());
+ }
}
- bool inside_with() const { return top_scope_->inside_with(); }
- Scanner& scanner() { return scanner_; }
- Mode mode() const { return mode_; }
- ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
- bool is_extended_mode() {
- ASSERT(top_scope_ != NULL);
- return top_scope_->is_extended_mode();
- }
+ bool inside_with() const { return scope_->inside_with(); }
+ ScriptDataImpl** cached_data() const { return cached_data_; }
+ CachedDataMode cached_data_mode() const { return cached_data_mode_; }
Scope* DeclarationScope(VariableMode mode) {
return IsLexicalVariableMode(mode)
- ? top_scope_ : top_scope_->DeclarationScope();
+ ? scope_ : scope_->DeclarationScope();
}
- // Check if the given string is 'eval' or 'arguments'.
- bool IsEvalOrArguments(Handle<String> string);
-
// All ParseXXX functions take as the last argument an *ok parameter
// which is set to false if parsing failed; it is unchanged otherwise.
// By making the 'exception handling' explicit, we are forced to check
@@ -623,31 +726,12 @@ class Parser : public ParserBase {
// Support for hamony block scoped bindings.
Block* ParseScopedBlock(ZoneStringList* labels, bool* ok);
- Expression* ParseExpression(bool accept_IN, bool* ok);
- Expression* ParseAssignmentExpression(bool accept_IN, bool* ok);
- Expression* ParseYieldExpression(bool* ok);
- Expression* ParseConditionalExpression(bool accept_IN, bool* ok);
- Expression* ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
- Expression* ParseUnaryExpression(bool* ok);
- Expression* ParsePostfixExpression(bool* ok);
- Expression* ParseLeftHandSideExpression(bool* ok);
- Expression* ParseNewExpression(bool* ok);
- Expression* ParseMemberExpression(bool* ok);
- Expression* ParseNewPrefix(PositionStack* stack, bool* ok);
- Expression* ParseMemberWithNewPrefixesExpression(PositionStack* stack,
- bool* ok);
- Expression* ParsePrimaryExpression(bool* ok);
- Expression* ParseArrayLiteral(bool* ok);
- Expression* ParseObjectLiteral(bool* ok);
- Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
-
// Initialize the components of a for-in / for-of statement.
void InitializeForEachStatement(ForEachStatement* stmt,
Expression* each,
Expression* subject,
Statement* body);
- ZoneList<Expression*>* ParseArguments(bool* ok);
FunctionLiteral* ParseFunctionLiteral(
Handle<String> name,
Scanner::Location function_name_location,
@@ -660,52 +744,10 @@ class Parser : public ParserBase {
// Magical syntax support.
Expression* ParseV8Intrinsic(bool* ok);
- bool is_generator() const { return current_function_state_->is_generator(); }
-
bool CheckInOrOf(bool accept_OF, ForEachStatement::VisitMode* visit_mode);
- Handle<String> LiteralString(PretenureFlag tenured) {
- if (scanner().is_literal_ascii()) {
- return isolate_->factory()->NewStringFromAscii(
- scanner().literal_ascii_string(), tenured);
- } else {
- return isolate_->factory()->NewStringFromTwoByte(
- scanner().literal_utf16_string(), tenured);
- }
- }
-
- Handle<String> NextLiteralString(PretenureFlag tenured) {
- if (scanner().is_next_literal_ascii()) {
- return isolate_->factory()->NewStringFromAscii(
- scanner().next_literal_ascii_string(), tenured);
- } else {
- return isolate_->factory()->NewStringFromTwoByte(
- scanner().next_literal_utf16_string(), tenured);
- }
- }
-
- Handle<String> GetSymbol();
-
// Get odd-ball literals.
Literal* GetLiteralUndefined(int position);
- Literal* GetLiteralTheHole(int position);
-
- Handle<String> ParseIdentifier(AllowEvalOrArgumentsAsIdentifier, bool* ok);
- Handle<String> ParseIdentifierOrStrictReservedWord(
- bool* is_strict_reserved, bool* ok);
- Handle<String> ParseIdentifierName(bool* ok);
- Handle<String> ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok);
-
- // Determine if the expression is a variable proxy and mark it as being used
- // in an assignment or with a increment/decrement operator. This is currently
- // used on for the statically checking assignments to harmony const bindings.
- void MarkAsLValue(Expression* expression);
-
- // Strict mode validation of LValue expressions
- void CheckStrictModeLValue(Expression* expression,
- bool* ok);
// For harmony block scoping mode: Check if the scope has conflicting var/let
// declarations from different scopes. It covers for example
@@ -734,8 +776,6 @@ class Parser : public ParserBase {
Scope* NewScope(Scope* parent, ScopeType type);
- Handle<String> LookupSymbol(int symbol_id);
-
Handle<String> LookupCachedSymbol(int symbol_id);
// Generate AST node that throw a ReferenceError with the given type.
@@ -760,35 +800,18 @@ class Parser : public ParserBase {
PreParser::PreParseResult LazyParseFunctionLiteral(
SingletonLogger* logger);
- AstNodeFactory<AstConstructionVisitor>* factory() {
- return current_function_state_->factory();
- }
-
Isolate* isolate_;
ZoneList<Handle<String> > symbol_cache_;
Handle<Script> script_;
Scanner scanner_;
PreParser* reusable_preparser_;
- Scope* top_scope_;
Scope* original_scope_; // for ES5 function declarations in sloppy eval
- FunctionState* current_function_state_;
Target* target_stack_; // for break, continue statements
- v8::Extension* extension_;
- ScriptDataImpl* pre_parse_data_;
- FuncNameInferrer* fni_;
-
- Mode mode_;
- // If true, the next (and immediately following) function literal is
- // preceded by a parenthesis.
- // Heuristically that means that the function will be called immediately,
- // so never lazily compile it.
- bool parenthesized_function_;
+ ScriptDataImpl** cached_data_;
+ CachedDataMode cached_data_mode_;
- Zone* zone_;
CompilationInfo* info_;
- friend class BlockState;
- friend class FunctionState;
};
diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc
index ac804398f..4ae9bec9e 100644
--- a/deps/v8/src/platform-cygwin.cc
+++ b/deps/v8/src/platform-cygwin.cc
@@ -51,7 +51,7 @@ namespace v8 {
namespace internal {
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
@@ -60,7 +60,7 @@ const char* OS::LocalTimezone(double time) {
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
// On Cygwin, struct tm does not contain a tm_gmtoff field.
time_t utc = time(NULL);
ASSERT(utc != -1);
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index 9ab6583e0..7d15cef6b 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -61,7 +61,7 @@ namespace v8 {
namespace internal {
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
@@ -70,7 +70,7 @@ const char* OS::LocalTimezone(double time) {
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index fbcad8f6d..527b9f616 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -53,7 +53,8 @@
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+ (defined(__arm__) || defined(__aarch64__)) && \
+ !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h>
#endif
@@ -117,7 +118,7 @@ bool OS::ArmUsingHardFloat() {
#endif // def __arm__
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
@@ -126,7 +127,7 @@ const char* OS::LocalTimezone(double time) {
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index 683a04d38..25ba0da08 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -182,7 +182,7 @@ void OS::SignalCodeMovingGC() {
}
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
@@ -191,7 +191,7 @@ const char* OS::LocalTimezone(double time) {
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index c881d4735..a5d477d61 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -59,7 +59,7 @@ namespace v8 {
namespace internal {
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
@@ -68,7 +68,7 @@ const char* OS::LocalTimezone(double time) {
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index 402d41132..5ca12522c 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -265,10 +265,10 @@ void OS::Sleep(int milliseconds) {
void OS::Abort() {
- // Redirect to std abort to signal abnormal program termination.
- if (FLAG_break_on_abort) {
- DebugBreak();
+ if (FLAG_hard_abort) {
+ V8_IMMEDIATE_CRASH();
}
+ // Redirect to std abort to signal abnormal program termination.
abort();
}
@@ -276,6 +276,8 @@ void OS::Abort() {
void OS::DebugBreak() {
#if V8_HOST_ARCH_ARM
asm("bkpt 0");
+#elif V8_HOST_ARCH_ARM64
+ asm("brk 0");
#elif V8_HOST_ARCH_MIPS
asm("break");
#elif V8_HOST_ARCH_IA32
@@ -352,7 +354,25 @@ double OS::TimeCurrentMillis() {
}
-double OS::DaylightSavingsOffset(double time) {
+class TimezoneCache {};
+
+
+TimezoneCache* OS::CreateTimezoneCache() {
+ return NULL;
+}
+
+
+void OS::DisposeTimezoneCache(TimezoneCache* cache) {
+ ASSERT(cache == NULL);
+}
+
+
+void OS::ClearTimezoneCache(TimezoneCache* cache) {
+ ASSERT(cache == NULL);
+}
+
+
+double OS::DaylightSavingsOffset(double time, TimezoneCache*) {
if (std::isnan(time)) return nan_value();
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
@@ -560,6 +580,8 @@ class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}
pthread_t thread_; // Thread handle for pthread.
+ // Synchronizes thread creation
+ Mutex thread_creation_mutex_;
};
Thread::Thread(const Options& options)
@@ -607,10 +629,10 @@ static void SetThreadName(const char* name) {
static void* ThreadEntry(void* arg) {
Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the first argument to pthread_create() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- thread->data()->thread_ = pthread_self();
+ // We take the lock here to make sure that pthread_create finished first since
+ // we don't know which thread will run first (the original thread or the new
+ // one).
+ { LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); }
SetThreadName(thread->name());
ASSERT(thread->data()->thread_ != kNoThread);
thread->NotifyStartedAndRun();
@@ -637,7 +659,10 @@ void Thread::Start() {
ASSERT_EQ(0, result);
}
#endif
- result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
+ {
+ LockGuard<Mutex> lock_guard(&data_->thread_creation_mutex_);
+ result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
+ }
ASSERT_EQ(0, result);
result = pthread_attr_destroy(&attr);
ASSERT_EQ(0, result);
diff --git a/deps/v8/src/platform-qnx.cc b/deps/v8/src/platform-qnx.cc
index cd031e795..ef0998f89 100644
--- a/deps/v8/src/platform-qnx.cc
+++ b/deps/v8/src/platform-qnx.cc
@@ -110,7 +110,7 @@ bool OS::ArmUsingHardFloat() {
#endif // __arm__
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
@@ -119,7 +119,7 @@ const char* OS::LocalTimezone(double time) {
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc
index 4d910d47a..f23ae0838 100644
--- a/deps/v8/src/platform-solaris.cc
+++ b/deps/v8/src/platform-solaris.cc
@@ -80,7 +80,7 @@ namespace v8 {
namespace internal {
-const char* OS::LocalTimezone(double time) {
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm* t = localtime(&tv);
@@ -89,7 +89,7 @@ const char* OS::LocalTimezone(double time) {
}
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
tzset();
return -static_cast<double>(timezone * msPerSecond);
}
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index 56261735b..fe84bcd3f 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -218,6 +218,97 @@ void MathSetup() {
}
+class TimezoneCache {
+ public:
+ TimezoneCache() : initialized_(false) { }
+
+ void Clear() {
+ initialized_ = false;
+ }
+
+ // Initialize timezone information. The timezone information is obtained from
+ // windows. If we cannot get the timezone information we fall back to CET.
+ void InitializeIfNeeded() {
+ // Just return if timezone information has already been initialized.
+ if (initialized_) return;
+
+ // Initialize POSIX time zone data.
+ _tzset();
+ // Obtain timezone information from operating system.
+ memset(&tzinfo_, 0, sizeof(tzinfo_));
+ if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) {
+ // If we cannot get timezone information we fall back to CET.
+ tzinfo_.Bias = -60;
+ tzinfo_.StandardDate.wMonth = 10;
+ tzinfo_.StandardDate.wDay = 5;
+ tzinfo_.StandardDate.wHour = 3;
+ tzinfo_.StandardBias = 0;
+ tzinfo_.DaylightDate.wMonth = 3;
+ tzinfo_.DaylightDate.wDay = 5;
+ tzinfo_.DaylightDate.wHour = 2;
+ tzinfo_.DaylightBias = -60;
+ }
+
+ // Make standard and DST timezone names.
+ WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1,
+ std_tz_name_, kTzNameSize, NULL, NULL);
+ std_tz_name_[kTzNameSize - 1] = '\0';
+ WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1,
+ dst_tz_name_, kTzNameSize, NULL, NULL);
+ dst_tz_name_[kTzNameSize - 1] = '\0';
+
+ // If OS returned empty string or resource id (like "@tzres.dll,-211")
+ // simply guess the name from the UTC bias of the timezone.
+ // To properly resolve the resource identifier requires a library load,
+ // which is not possible in a sandbox.
+ if (std_tz_name_[0] == '\0' || std_tz_name_[0] == '@') {
+ OS::SNPrintF(Vector<char>(std_tz_name_, kTzNameSize - 1),
+ "%s Standard Time",
+ GuessTimezoneNameFromBias(tzinfo_.Bias));
+ }
+ if (dst_tz_name_[0] == '\0' || dst_tz_name_[0] == '@') {
+ OS::SNPrintF(Vector<char>(dst_tz_name_, kTzNameSize - 1),
+ "%s Daylight Time",
+ GuessTimezoneNameFromBias(tzinfo_.Bias));
+ }
+ // Timezone information initialized.
+ initialized_ = true;
+ }
+
+ // Guess the name of the timezone from the bias.
+ // The guess is very biased towards the northern hemisphere.
+ const char* GuessTimezoneNameFromBias(int bias) {
+ static const int kHour = 60;
+ switch (-bias) {
+ case -9*kHour: return "Alaska";
+ case -8*kHour: return "Pacific";
+ case -7*kHour: return "Mountain";
+ case -6*kHour: return "Central";
+ case -5*kHour: return "Eastern";
+ case -4*kHour: return "Atlantic";
+ case 0*kHour: return "GMT";
+ case +1*kHour: return "Central Europe";
+ case +2*kHour: return "Eastern Europe";
+ case +3*kHour: return "Russia";
+ case +5*kHour + 30: return "India";
+ case +8*kHour: return "China";
+ case +9*kHour: return "Japan";
+ case +12*kHour: return "New Zealand";
+ default: return "Local";
+ }
+ }
+
+
+ private:
+ static const int kTzNameSize = 128;
+ bool initialized_;
+ char std_tz_name_[kTzNameSize];
+ char dst_tz_name_[kTzNameSize];
+ TIME_ZONE_INFORMATION tzinfo_;
+ friend class Win32Time;
+};
+
+
// ----------------------------------------------------------------------------
// The Time class represents time on win32. A timestamp is represented as
// a 64-bit integer in 100 nanoseconds since January 1, 1601 (UTC). JavaScript
@@ -242,14 +333,14 @@ class Win32Time {
// LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This
// routine also takes into account whether daylight saving is effect
// at the time.
- int64_t LocalOffset();
+ int64_t LocalOffset(TimezoneCache* cache);
// Returns the daylight savings time offset for the time in milliseconds.
- int64_t DaylightSavingsOffset();
+ int64_t DaylightSavingsOffset(TimezoneCache* cache);
// Returns a string identifying the current timezone for the
// timestamp taking into account daylight saving.
- char* LocalTimezone();
+ char* LocalTimezone(TimezoneCache* cache);
private:
// Constants for time conversion.
@@ -258,25 +349,10 @@ class Win32Time {
static const int64_t kMsPerMinute = 60000;
// Constants for timezone information.
- static const int kTzNameSize = 128;
static const bool kShortTzNames = false;
- // Timezone information. We need to have static buffers for the
- // timezone names because we return pointers to these in
- // LocalTimezone().
- static bool tz_initialized_;
- static TIME_ZONE_INFORMATION tzinfo_;
- static char std_tz_name_[kTzNameSize];
- static char dst_tz_name_[kTzNameSize];
-
- // Initialize the timezone information (if not already done).
- static void TzSet();
-
- // Guess the name of the timezone from the bias.
- static const char* GuessTimezoneNameFromBias(int bias);
-
// Return whether or not daylight savings time is in effect at this time.
- bool InDST();
+ bool InDST(TimezoneCache* cache);
// Accessor for FILETIME representation.
FILETIME& ft() { return time_.ft_; }
@@ -298,13 +374,6 @@ class Win32Time {
};
-// Static variables.
-bool Win32Time::tz_initialized_ = false;
-TIME_ZONE_INFORMATION Win32Time::tzinfo_;
-char Win32Time::std_tz_name_[kTzNameSize];
-char Win32Time::dst_tz_name_[kTzNameSize];
-
-
// Initialize timestamp to start of epoc.
Win32Time::Win32Time() {
t() = 0;
@@ -393,90 +462,13 @@ void Win32Time::SetToCurrentTime() {
}
-// Guess the name of the timezone from the bias.
-// The guess is very biased towards the northern hemisphere.
-const char* Win32Time::GuessTimezoneNameFromBias(int bias) {
- static const int kHour = 60;
- switch (-bias) {
- case -9*kHour: return "Alaska";
- case -8*kHour: return "Pacific";
- case -7*kHour: return "Mountain";
- case -6*kHour: return "Central";
- case -5*kHour: return "Eastern";
- case -4*kHour: return "Atlantic";
- case 0*kHour: return "GMT";
- case +1*kHour: return "Central Europe";
- case +2*kHour: return "Eastern Europe";
- case +3*kHour: return "Russia";
- case +5*kHour + 30: return "India";
- case +8*kHour: return "China";
- case +9*kHour: return "Japan";
- case +12*kHour: return "New Zealand";
- default: return "Local";
- }
-}
-
-
-// Initialize timezone information. The timezone information is obtained from
-// windows. If we cannot get the timezone information we fall back to CET.
-// Please notice that this code is not thread-safe.
-void Win32Time::TzSet() {
- // Just return if timezone information has already been initialized.
- if (tz_initialized_) return;
-
- // Initialize POSIX time zone data.
- _tzset();
- // Obtain timezone information from operating system.
- memset(&tzinfo_, 0, sizeof(tzinfo_));
- if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) {
- // If we cannot get timezone information we fall back to CET.
- tzinfo_.Bias = -60;
- tzinfo_.StandardDate.wMonth = 10;
- tzinfo_.StandardDate.wDay = 5;
- tzinfo_.StandardDate.wHour = 3;
- tzinfo_.StandardBias = 0;
- tzinfo_.DaylightDate.wMonth = 3;
- tzinfo_.DaylightDate.wDay = 5;
- tzinfo_.DaylightDate.wHour = 2;
- tzinfo_.DaylightBias = -60;
- }
-
- // Make standard and DST timezone names.
- WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1,
- std_tz_name_, kTzNameSize, NULL, NULL);
- std_tz_name_[kTzNameSize - 1] = '\0';
- WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1,
- dst_tz_name_, kTzNameSize, NULL, NULL);
- dst_tz_name_[kTzNameSize - 1] = '\0';
-
- // If OS returned empty string or resource id (like "@tzres.dll,-211")
- // simply guess the name from the UTC bias of the timezone.
- // To properly resolve the resource identifier requires a library load,
- // which is not possible in a sandbox.
- if (std_tz_name_[0] == '\0' || std_tz_name_[0] == '@') {
- OS::SNPrintF(Vector<char>(std_tz_name_, kTzNameSize - 1),
- "%s Standard Time",
- GuessTimezoneNameFromBias(tzinfo_.Bias));
- }
- if (dst_tz_name_[0] == '\0' || dst_tz_name_[0] == '@') {
- OS::SNPrintF(Vector<char>(dst_tz_name_, kTzNameSize - 1),
- "%s Daylight Time",
- GuessTimezoneNameFromBias(tzinfo_.Bias));
- }
-
- // Timezone information initialized.
- tz_initialized_ = true;
-}
-
-
// Return the local timezone offset in milliseconds east of UTC. This
// takes into account whether daylight saving is in effect at the time.
// Only times in the 32-bit Unix range may be passed to this function.
// Also, adding the time-zone offset to the input must not overflow.
// The function EquivalentTime() in date.js guarantees this.
-int64_t Win32Time::LocalOffset() {
- // Initialize timezone information, if needed.
- TzSet();
+int64_t Win32Time::LocalOffset(TimezoneCache* cache) {
+ cache->InitializeIfNeeded();
Win32Time rounded_to_second(*this);
rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler *
@@ -499,29 +491,30 @@ int64_t Win32Time::LocalOffset() {
if (localtime_s(&posix_local_time_struct, &posix_time)) return 0;
if (posix_local_time_struct.tm_isdst > 0) {
- return (tzinfo_.Bias + tzinfo_.DaylightBias) * -kMsPerMinute;
+ return (cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * -kMsPerMinute;
} else if (posix_local_time_struct.tm_isdst == 0) {
- return (tzinfo_.Bias + tzinfo_.StandardBias) * -kMsPerMinute;
+ return (cache->tzinfo_.Bias + cache->tzinfo_.StandardBias) * -kMsPerMinute;
} else {
- return tzinfo_.Bias * -kMsPerMinute;
+ return cache->tzinfo_.Bias * -kMsPerMinute;
}
}
// Return whether or not daylight savings time is in effect at this time.
-bool Win32Time::InDST() {
- // Initialize timezone information, if needed.
- TzSet();
+bool Win32Time::InDST(TimezoneCache* cache) {
+ cache->InitializeIfNeeded();
// Determine if DST is in effect at the specified time.
bool in_dst = false;
- if (tzinfo_.StandardDate.wMonth != 0 || tzinfo_.DaylightDate.wMonth != 0) {
+ if (cache->tzinfo_.StandardDate.wMonth != 0 ||
+ cache->tzinfo_.DaylightDate.wMonth != 0) {
// Get the local timezone offset for the timestamp in milliseconds.
- int64_t offset = LocalOffset();
+ int64_t offset = LocalOffset(cache);
// Compute the offset for DST. The bias parameters in the timezone info
// are specified in minutes. These must be converted to milliseconds.
- int64_t dstofs = -(tzinfo_.Bias + tzinfo_.DaylightBias) * kMsPerMinute;
+ int64_t dstofs =
+ -(cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * kMsPerMinute;
// If the local time offset equals the timezone bias plus the daylight
// bias then DST is in effect.
@@ -533,17 +526,17 @@ bool Win32Time::InDST() {
// Return the daylight savings time offset for this time.
-int64_t Win32Time::DaylightSavingsOffset() {
- return InDST() ? 60 * kMsPerMinute : 0;
+int64_t Win32Time::DaylightSavingsOffset(TimezoneCache* cache) {
+ return InDST(cache) ? 60 * kMsPerMinute : 0;
}
// Returns a string identifying the current timezone for the
// timestamp taking into account daylight saving.
-char* Win32Time::LocalTimezone() {
+char* Win32Time::LocalTimezone(TimezoneCache* cache) {
// Return the standard or DST time zone name based on whether daylight
// saving is in effect at the given time.
- return InDST() ? dst_tz_name_ : std_tz_name_;
+ return InDST(cache) ? cache->dst_tz_name_ : cache->std_tz_name_;
}
@@ -586,27 +579,43 @@ double OS::TimeCurrentMillis() {
}
+TimezoneCache* OS::CreateTimezoneCache() {
+ return new TimezoneCache();
+}
+
+
+void OS::DisposeTimezoneCache(TimezoneCache* cache) {
+ delete cache;
+}
+
+
+void OS::ClearTimezoneCache(TimezoneCache* cache) {
+ cache->Clear();
+}
+
+
// Returns a string identifying the current timezone taking into
// account daylight saving.
-const char* OS::LocalTimezone(double time) {
- return Win32Time(time).LocalTimezone();
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+ return Win32Time(time).LocalTimezone(cache);
}
// Returns the local time offset in milliseconds east of UTC without
// taking daylight savings time into account.
-double OS::LocalTimeOffset() {
+double OS::LocalTimeOffset(TimezoneCache* cache) {
// Use current time, rounded to the millisecond.
Win32Time t(TimeCurrentMillis());
// Time::LocalOffset inlcudes any daylight savings offset, so subtract it.
- return static_cast<double>(t.LocalOffset() - t.DaylightSavingsOffset());
+ return static_cast<double>(t.LocalOffset(cache) -
+ t.DaylightSavingsOffset(cache));
}
// Returns the daylight savings offset in milliseconds for the given
// time.
-double OS::DaylightSavingsOffset(double time) {
- int64_t offset = Win32Time(time).DaylightSavingsOffset();
+double OS::DaylightSavingsOffset(double time, TimezoneCache* cache) {
+ int64_t offset = Win32Time(time).DaylightSavingsOffset(cache);
return static_cast<double>(offset);
}
@@ -662,15 +671,15 @@ static bool HasConsole() {
static void VPrintHelper(FILE* stream, const char* format, va_list args) {
- if (HasConsole()) {
- vfprintf(stream, format, args);
- } else {
+ if ((stream == stdout || stream == stderr) && !HasConsole()) {
// It is important to use safe print here in order to avoid
// overflowing the buffer. We might truncate the output, but this
// does not crash.
EmbeddedVector<char, 4096> buffer;
OS::VSNPrintF(buffer, format, args);
OutputDebugStringA(buffer.start());
+ } else {
+ vfprintf(stream, format, args);
}
}
@@ -923,12 +932,11 @@ void OS::Sleep(int milliseconds) {
void OS::Abort() {
- if (IsDebuggerPresent() || FLAG_break_on_abort) {
- DebugBreak();
- } else {
- // Make the MSVCRT do a silent abort.
- raise(SIGABRT);
+ if (FLAG_hard_abort) {
+ V8_IMMEDIATE_CRASH();
}
+ // Make the MSVCRT do a silent abort.
+ raise(SIGABRT);
}
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index 8af90f1cb..d087d2397 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -159,6 +159,9 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
#endif // V8_NO_FAST_TLS
+class TimezoneCache;
+
+
// ----------------------------------------------------------------------------
// OS
//
@@ -182,16 +185,20 @@ class OS {
// 00:00:00 UTC, January 1, 1970.
static double TimeCurrentMillis();
+ static TimezoneCache* CreateTimezoneCache();
+ static void DisposeTimezoneCache(TimezoneCache* cache);
+ static void ClearTimezoneCache(TimezoneCache* cache);
+
// Returns a string identifying the current time zone. The
// timestamp is used for determining if DST is in effect.
- static const char* LocalTimezone(double time);
+ static const char* LocalTimezone(double time, TimezoneCache* cache);
// Returns the local time offset in milliseconds east of UTC without
// taking daylight savings time into account.
- static double LocalTimeOffset();
+ static double LocalTimeOffset(TimezoneCache* cache);
// Returns the daylight savings offset for the given time.
- static double DaylightSavingsOffset(double time);
+ static double DaylightSavingsOffset(double time, TimezoneCache* cache);
// Returns last OS error.
static int GetLastError();
diff --git a/deps/v8/src/preparse-data-format.h b/deps/v8/src/preparse-data-format.h
index e64326e57..e2cf0a1a3 100644
--- a/deps/v8/src/preparse-data-format.h
+++ b/deps/v8/src/preparse-data-format.h
@@ -37,7 +37,7 @@ struct PreparseDataConstants {
public:
// Layout and constants of the preparse data exchange format.
static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 7;
+ static const unsigned kCurrentVersion = 8;
static const int kMagicOffset = 0;
static const int kVersionOffset = 1;
diff --git a/deps/v8/src/preparse-data.cc b/deps/v8/src/preparse-data.cc
index 8e0884828..9f585a991 100644
--- a/deps/v8/src/preparse-data.cc
+++ b/deps/v8/src/preparse-data.cc
@@ -37,13 +37,40 @@
namespace v8 {
namespace internal {
-// ----------------------------------------------------------------------------
-// FunctionLoggingParserRecorder
-FunctionLoggingParserRecorder::FunctionLoggingParserRecorder()
+template <typename Char>
+static int vector_hash(Vector<const Char> string) {
+ int hash = 0;
+ for (int i = 0; i < string.length(); i++) {
+ int c = static_cast<int>(string[i]);
+ hash += c;
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
+ return hash;
+}
+
+
+static bool vector_compare(void* a, void* b) {
+ CompleteParserRecorder::Key* string1 =
+ reinterpret_cast<CompleteParserRecorder::Key*>(a);
+ CompleteParserRecorder::Key* string2 =
+ reinterpret_cast<CompleteParserRecorder::Key*>(b);
+ if (string1->is_one_byte != string2->is_one_byte) return false;
+ int length = string1->literal_bytes.length();
+ if (string2->literal_bytes.length() != length) return false;
+ return memcmp(string1->literal_bytes.start(),
+ string2->literal_bytes.start(), length) == 0;
+}
+
+
+CompleteParserRecorder::CompleteParserRecorder()
: function_store_(0),
- is_recording_(true),
- pause_count_(0) {
+ literal_chars_(0),
+ symbol_store_(0),
+ symbol_keys_(0),
+ string_table_(vector_compare),
+ symbol_id_(0) {
preamble_[PreparseDataConstants::kMagicOffset] =
PreparseDataConstants::kMagicNumber;
preamble_[PreparseDataConstants::kVersionOffset] =
@@ -56,10 +83,11 @@ FunctionLoggingParserRecorder::FunctionLoggingParserRecorder()
#ifdef DEBUG
prev_start_ = -1;
#endif
+ should_log_symbols_ = true;
}
-void FunctionLoggingParserRecorder::LogMessage(int start_pos,
+void CompleteParserRecorder::LogMessage(int start_pos,
int end_pos,
const char* message,
const char* arg_opt) {
@@ -75,11 +103,11 @@ void FunctionLoggingParserRecorder::LogMessage(int start_pos,
STATIC_ASSERT(PreparseDataConstants::kMessageTextPos == 3);
WriteString(CStrVector(message));
if (arg_opt != NULL) WriteString(CStrVector(arg_opt));
- is_recording_ = false;
+ should_log_symbols_ = false;
}
-void FunctionLoggingParserRecorder::WriteString(Vector<const char> str) {
+void CompleteParserRecorder::WriteString(Vector<const char> str) {
function_store_.Add(str.length());
for (int i = 0; i < str.length(); i++) {
function_store_.Add(str[i]);
@@ -87,43 +115,27 @@ void FunctionLoggingParserRecorder::WriteString(Vector<const char> str) {
}
-// ----------------------------------------------------------------------------
-// PartialParserRecorder - Record both function entries and symbols.
-
-Vector<unsigned> PartialParserRecorder::ExtractData() {
- int function_size = function_store_.size();
- int total_size = PreparseDataConstants::kHeaderSize + function_size;
- Vector<unsigned> data = Vector<unsigned>::New(total_size);
- preamble_[PreparseDataConstants::kFunctionsSizeOffset] = function_size;
- preamble_[PreparseDataConstants::kSymbolCountOffset] = 0;
- OS::MemCopy(data.start(), preamble_, sizeof(preamble_));
- int symbol_start = PreparseDataConstants::kHeaderSize + function_size;
- if (function_size > 0) {
- function_store_.WriteTo(data.SubVector(PreparseDataConstants::kHeaderSize,
- symbol_start));
- }
- return data;
+void CompleteParserRecorder::LogOneByteSymbol(int start,
+ Vector<const uint8_t> literal) {
+ ASSERT(should_log_symbols_);
+ int hash = vector_hash(literal);
+ LogSymbol(start, hash, true, literal);
}
-// ----------------------------------------------------------------------------
-// CompleteParserRecorder - Record both function entries and symbols.
-
-CompleteParserRecorder::CompleteParserRecorder()
- : FunctionLoggingParserRecorder(),
- literal_chars_(0),
- symbol_store_(0),
- symbol_keys_(0),
- string_table_(vector_compare),
- symbol_id_(0) {
+void CompleteParserRecorder::LogTwoByteSymbol(int start,
+ Vector<const uint16_t> literal) {
+ ASSERT(should_log_symbols_);
+ int hash = vector_hash(literal);
+ LogSymbol(start, hash, false, Vector<const byte>::cast(literal));
}
void CompleteParserRecorder::LogSymbol(int start,
int hash,
- bool is_ascii,
+ bool is_one_byte,
Vector<const byte> literal_bytes) {
- Key key = { is_ascii, literal_bytes };
+ Key key = { is_one_byte, literal_bytes };
HashMap::Entry* entry = string_table_.Lookup(&key, hash, true);
int id = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
if (id == 0) {
@@ -167,16 +179,26 @@ Vector<unsigned> CompleteParserRecorder::ExtractData() {
void CompleteParserRecorder::WriteNumber(int number) {
+ // Split the number into chunks of 7 bits. Write them one after another (the
+ // most significant first). Use the MSB of each byte for signalling that the
+ // number continues. See ScriptDataImpl::ReadNumber for the reading side.
ASSERT(number >= 0);
int mask = (1 << 28) - 1;
- for (int i = 28; i > 0; i -= 7) {
- if (number > mask) {
- symbol_store_.Add(static_cast<byte>(number >> i) | 0x80u);
- number &= mask;
- }
+ int i = 28;
+ // 26 million symbols ought to be enough for anybody.
+ ASSERT(number <= mask);
+ while (number < mask) {
+ mask >>= 7;
+ i -= 7;
+ }
+ while (i > 0) {
+ symbol_store_.Add(static_cast<byte>(number >> i) | 0x80u);
+ number &= mask;
mask >>= 7;
+ i -= 7;
}
+ ASSERT(number < (1 << 7));
symbol_store_.Add(static_cast<byte>(number));
}
diff --git a/deps/v8/src/preparse-data.h b/deps/v8/src/preparse-data.h
index 3a1e99d5d..6a968e3b2 100644
--- a/deps/v8/src/preparse-data.h
+++ b/deps/v8/src/preparse-data.h
@@ -35,13 +35,11 @@
namespace v8 {
namespace internal {
-// ----------------------------------------------------------------------------
-// ParserRecorder - Logging of preparser data.
// Abstract interface for preparse data recorder.
class ParserRecorder {
public:
- ParserRecorder() { }
+ ParserRecorder() : should_log_symbols_(false) { }
virtual ~ParserRecorder() { }
// Logs the scope and some details of a function literal in the source.
@@ -49,11 +47,7 @@ class ParserRecorder {
int end,
int literals,
int properties,
- LanguageMode language_mode) = 0;
-
- // Logs a symbol creation of a literal or identifier.
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
- virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
+ StrictMode strict_mode) = 0;
// Logs an error message and marks the log as containing an error.
// Further logging will be ignored, and ExtractData will return a vector
@@ -63,38 +57,121 @@ class ParserRecorder {
const char* message,
const char* argument_opt) = 0;
- virtual int function_position() = 0;
+ // Logs a symbol creation of a literal or identifier.
+ bool ShouldLogSymbols() { return should_log_symbols_; }
+ // The following functions are only callable on CompleteParserRecorder
+ // and are guarded by calls to ShouldLogSymbols.
+ virtual void LogOneByteSymbol(int start, Vector<const uint8_t> literal) {
+ UNREACHABLE();
+ }
+ virtual void LogTwoByteSymbol(int start, Vector<const uint16_t> literal) {
+ UNREACHABLE();
+ }
+ virtual void PauseRecording() { UNREACHABLE(); }
+ virtual void ResumeRecording() { UNREACHABLE(); }
- virtual int symbol_position() = 0;
+ protected:
+ bool should_log_symbols_;
- virtual int symbol_ids() = 0;
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ParserRecorder);
+};
- virtual Vector<unsigned> ExtractData() = 0;
- virtual void PauseRecording() = 0;
+class SingletonLogger : public ParserRecorder {
+ public:
+ SingletonLogger() : has_error_(false), start_(-1), end_(-1) { }
+ virtual ~SingletonLogger() { }
- virtual void ResumeRecording() = 0;
-};
+ void Reset() { has_error_ = false; }
+
+ virtual void LogFunction(int start,
+ int end,
+ int literals,
+ int properties,
+ StrictMode strict_mode) {
+ ASSERT(!has_error_);
+ start_ = start;
+ end_ = end;
+ literals_ = literals;
+ properties_ = properties;
+ strict_mode_ = strict_mode;
+ };
+
+ // Logs an error message and marks the log as containing an error.
+ // Further logging will be ignored, and ExtractData will return a vector
+ // representing the error only.
+ virtual void LogMessage(int start,
+ int end,
+ const char* message,
+ const char* argument_opt) {
+ if (has_error_) return;
+ has_error_ = true;
+ start_ = start;
+ end_ = end;
+ message_ = message;
+ argument_opt_ = argument_opt;
+ }
+
+ bool has_error() { return has_error_; }
+
+ int start() { return start_; }
+ int end() { return end_; }
+ int literals() {
+ ASSERT(!has_error_);
+ return literals_;
+ }
+ int properties() {
+ ASSERT(!has_error_);
+ return properties_;
+ }
+ StrictMode strict_mode() {
+ ASSERT(!has_error_);
+ return strict_mode_;
+ }
+ const char* message() {
+ ASSERT(has_error_);
+ return message_;
+ }
+ const char* argument_opt() {
+ ASSERT(has_error_);
+ return argument_opt_;
+ }
+ private:
+ bool has_error_;
+ int start_;
+ int end_;
+ // For function entries.
+ int literals_;
+ int properties_;
+ StrictMode strict_mode_;
+ // For error messages.
+ const char* message_;
+ const char* argument_opt_;
+};
-// ----------------------------------------------------------------------------
-// FunctionLoggingParserRecorder - Record only function entries
-class FunctionLoggingParserRecorder : public ParserRecorder {
+class CompleteParserRecorder : public ParserRecorder {
public:
- FunctionLoggingParserRecorder();
- virtual ~FunctionLoggingParserRecorder() {}
+ struct Key {
+ bool is_one_byte;
+ Vector<const byte> literal_bytes;
+ };
+
+ CompleteParserRecorder();
+ virtual ~CompleteParserRecorder() {}
virtual void LogFunction(int start,
int end,
int literals,
int properties,
- LanguageMode language_mode) {
+ StrictMode strict_mode) {
function_store_.Add(start);
function_store_.Add(end);
function_store_.Add(literals);
function_store_.Add(properties);
- function_store_.Add(language_mode);
+ function_store_.Add(strict_mode);
}
// Logs an error message and marks the log as containing an error.
@@ -105,118 +182,44 @@ class FunctionLoggingParserRecorder : public ParserRecorder {
const char* message,
const char* argument_opt);
- virtual int function_position() { return function_store_.size(); }
-
-
- virtual Vector<unsigned> ExtractData() = 0;
-
virtual void PauseRecording() {
- pause_count_++;
- is_recording_ = false;
+ ASSERT(should_log_symbols_);
+ should_log_symbols_ = false;
}
virtual void ResumeRecording() {
- ASSERT(pause_count_ > 0);
- if (--pause_count_ == 0) is_recording_ = !has_error();
+ ASSERT(!should_log_symbols_);
+ should_log_symbols_ = !has_error();
}
- protected:
+ virtual void LogOneByteSymbol(int start, Vector<const uint8_t> literal);
+ virtual void LogTwoByteSymbol(int start, Vector<const uint16_t> literal);
+ Vector<unsigned> ExtractData();
+
+ private:
bool has_error() {
return static_cast<bool>(preamble_[PreparseDataConstants::kHasErrorOffset]);
}
- bool is_recording() {
- return is_recording_;
- }
-
void WriteString(Vector<const char> str);
+ // For testing. Defined in test-parsing.cc.
+ friend struct CompleteParserRecorderFriend;
+
+ void LogSymbol(int start,
+ int hash,
+ bool is_one_byte,
+ Vector<const byte> literal);
+
+ // Write a non-negative number to the symbol store.
+ void WriteNumber(int number);
+
Collector<unsigned> function_store_;
unsigned preamble_[PreparseDataConstants::kHeaderSize];
- bool is_recording_;
- int pause_count_;
#ifdef DEBUG
int prev_start_;
#endif
-};
-
-
-// ----------------------------------------------------------------------------
-// PartialParserRecorder - Record only function entries
-
-class PartialParserRecorder : public FunctionLoggingParserRecorder {
- public:
- PartialParserRecorder() : FunctionLoggingParserRecorder() { }
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
- virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
- virtual ~PartialParserRecorder() { }
- virtual Vector<unsigned> ExtractData();
- virtual int symbol_position() { return 0; }
- virtual int symbol_ids() { return 0; }
-};
-
-
-// ----------------------------------------------------------------------------
-// CompleteParserRecorder - Record both function entries and symbols.
-
-class CompleteParserRecorder: public FunctionLoggingParserRecorder {
- public:
- CompleteParserRecorder();
- virtual ~CompleteParserRecorder() { }
-
- virtual void LogAsciiSymbol(int start, Vector<const char> literal) {
- if (!is_recording_) return;
- int hash = vector_hash(literal);
- LogSymbol(start, hash, true, Vector<const byte>::cast(literal));
- }
-
- virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) {
- if (!is_recording_) return;
- int hash = vector_hash(literal);
- LogSymbol(start, hash, false, Vector<const byte>::cast(literal));
- }
-
- virtual Vector<unsigned> ExtractData();
-
- virtual int symbol_position() { return symbol_store_.size(); }
- virtual int symbol_ids() { return symbol_id_; }
-
- private:
- struct Key {
- bool is_ascii;
- Vector<const byte> literal_bytes;
- };
-
- virtual void LogSymbol(int start,
- int hash,
- bool is_ascii,
- Vector<const byte> literal);
-
- template <typename Char>
- static int vector_hash(Vector<const Char> string) {
- int hash = 0;
- for (int i = 0; i < string.length(); i++) {
- int c = static_cast<int>(string[i]);
- hash += c;
- hash += (hash << 10);
- hash ^= (hash >> 6);
- }
- return hash;
- }
-
- static bool vector_compare(void* a, void* b) {
- Key* string1 = reinterpret_cast<Key*>(a);
- Key* string2 = reinterpret_cast<Key*>(b);
- if (string1->is_ascii != string2->is_ascii) return false;
- int length = string1->literal_bytes.length();
- if (string2->literal_bytes.length() != length) return false;
- return memcmp(string1->literal_bytes.start(),
- string2->literal_bytes.start(), length) == 0;
- }
-
- // Write a non-negative number to the symbol store.
- void WriteNumber(int number);
Collector<byte> literal_chars_;
Collector<byte> symbol_store_;
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index fa6f21799..9bcc88002 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -55,14 +55,107 @@ int isfinite(double value);
namespace v8 {
namespace internal {
+
+void PreParserTraits::CheckStrictModeLValue(PreParserExpression expression,
+ bool* ok) {
+ if (expression.IsIdentifier() &&
+ expression.AsIdentifier().IsEvalOrArguments()) {
+ pre_parser_->ReportMessage("strict_eval_arguments",
+ Vector<const char*>::empty());
+ *ok = false;
+ }
+}
+
+
+void PreParserTraits::ReportMessageAt(Scanner::Location location,
+ const char* message,
+ Vector<const char*> args,
+ bool is_reference_error) {
+ ReportMessageAt(location.beg_pos,
+ location.end_pos,
+ message,
+ args.length() > 0 ? args[0] : NULL,
+ is_reference_error);
+}
+
+
+void PreParserTraits::ReportMessageAt(Scanner::Location location,
+ const char* type,
+ const char* name_opt,
+ bool is_reference_error) {
+ pre_parser_->log_
+ ->LogMessage(location.beg_pos, location.end_pos, type, name_opt);
+}
+
+
+void PreParserTraits::ReportMessageAt(int start_pos,
+ int end_pos,
+ const char* type,
+ const char* name_opt,
+ bool is_reference_error) {
+ pre_parser_->log_->LogMessage(start_pos, end_pos, type, name_opt);
+}
+
+
+PreParserIdentifier PreParserTraits::GetSymbol(Scanner* scanner) {
+ pre_parser_->LogSymbol();
+ if (scanner->current_token() == Token::FUTURE_RESERVED_WORD) {
+ return PreParserIdentifier::FutureReserved();
+ } else if (scanner->current_token() ==
+ Token::FUTURE_STRICT_RESERVED_WORD) {
+ return PreParserIdentifier::FutureStrictReserved();
+ } else if (scanner->current_token() == Token::YIELD) {
+ return PreParserIdentifier::Yield();
+ }
+ if (scanner->UnescapedLiteralMatches("eval", 4)) {
+ return PreParserIdentifier::Eval();
+ }
+ if (scanner->UnescapedLiteralMatches("arguments", 9)) {
+ return PreParserIdentifier::Arguments();
+ }
+ return PreParserIdentifier::Default();
+}
+
+
+PreParserExpression PreParserTraits::ExpressionFromString(
+ int pos, Scanner* scanner, PreParserFactory* factory) {
+ pre_parser_->LogSymbol();
+ if (scanner->UnescapedLiteralMatches("use strict", 10)) {
+ return PreParserExpression::UseStrictStringLiteral();
+ }
+ return PreParserExpression::StringLiteral();
+}
+
+
+PreParserExpression PreParserTraits::ParseV8Intrinsic(bool* ok) {
+ return pre_parser_->ParseV8Intrinsic(ok);
+}
+
+
+PreParserExpression PreParserTraits::ParseFunctionLiteral(
+ PreParserIdentifier name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ int function_token_position,
+ FunctionLiteral::FunctionType type,
+ bool* ok) {
+ return pre_parser_->ParseFunctionLiteral(
+ name, function_name_location, name_is_strict_reserved, is_generator,
+ function_token_position, type, ok);
+}
+
+
PreParser::PreParseResult PreParser::PreParseLazyFunction(
- LanguageMode mode, bool is_generator, ParserRecorder* log) {
+ StrictMode strict_mode, bool is_generator, ParserRecorder* log) {
log_ = log;
// Lazy functions always have trivial outer scopes (no with/catch scopes).
- Scope top_scope(&scope_, kTopLevelScope);
- set_language_mode(mode);
- Scope function_scope(&scope_, kFunctionScope);
- function_scope.set_is_generator(is_generator);
+ PreParserScope top_scope(scope_, GLOBAL_SCOPE);
+ FunctionState top_state(&function_state_, &scope_, &top_scope);
+ scope_->SetStrictMode(strict_mode);
+ PreParserScope function_scope(scope_, FUNCTION_SCOPE);
+ FunctionState function_state(&function_state_, &scope_, &function_scope);
+ function_state.set_is_generator(is_generator);
ASSERT_EQ(Token::LBRACE, scanner()->current_token());
bool ok = true;
int start_position = peek_position();
@@ -72,7 +165,7 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
ReportUnexpectedToken(scanner()->current_token());
} else {
ASSERT_EQ(Token::RBRACE, scanner()->peek());
- if (!scope_->is_classic_mode()) {
+ if (scope_->strict_mode() == STRICT) {
int end_pos = scanner()->location().end_pos;
CheckOctalLiteral(start_position, end_pos, &ok);
}
@@ -139,8 +232,7 @@ PreParser::SourceElements PreParser::ParseSourceElements(int end_token,
Statement statement = ParseSourceElement(CHECK_OK);
if (directive_prologue) {
if (statement.IsUseStrictLiteral()) {
- set_language_mode(allow_harmony_scoping() ?
- EXTENDED_MODE : STRICT_MODE);
+ scope_->SetStrictMode(STRICT);
} else if (!statement.IsStringLiteral()) {
directive_prologue = false;
}
@@ -234,9 +326,11 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
Scanner::Location start_location = scanner()->peek_location();
Statement statement = ParseFunctionDeclaration(CHECK_OK);
Scanner::Location end_location = scanner()->location();
- if (!scope_->is_classic_mode()) {
- ReportMessageAt(start_location.beg_pos, end_location.end_pos,
- "strict_function", NULL);
+ if (strict_mode() == STRICT) {
+ PreParserTraits::ReportMessageAt(start_location.beg_pos,
+ end_location.end_pos,
+ "strict_function",
+ NULL);
*ok = false;
return Statement::Default();
} else {
@@ -260,7 +354,7 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
// 'function' '*' Identifier '(' FormalParameterListopt ')'
// '{' FunctionBody '}'
Expect(Token::FUNCTION, CHECK_OK);
-
+ int pos = position();
bool is_generator = allow_generators() && Check(Token::MUL);
bool is_strict_reserved = false;
Identifier name = ParseIdentifierOrStrictReservedWord(
@@ -269,6 +363,8 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
scanner()->location(),
is_strict_reserved,
is_generator,
+ pos,
+ FunctionLiteral::DECLARATION,
CHECK_OK);
return Statement::FunctionDeclaration();
}
@@ -283,7 +379,7 @@ PreParser::Statement PreParser::ParseBlock(bool* ok) {
//
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
- if (is_extended_mode()) {
+ if (allow_harmony_scoping() && strict_mode() == STRICT) {
ParseSourceElement(CHECK_OK);
} else {
ParseStatement(CHECK_OK);
@@ -343,30 +439,24 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// * It is a Syntax Error if the code that matches this production is not
// contained in extended code.
//
- // However disallowing const in classic mode will break compatibility with
+ // However disallowing const in sloppy mode will break compatibility with
// existing pages. Therefore we keep allowing const with the old
- // non-harmony semantics in classic mode.
+ // non-harmony semantics in sloppy mode.
Consume(Token::CONST);
- switch (language_mode()) {
- case CLASSIC_MODE:
- break;
- case STRICT_MODE: {
- Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location, "strict_const", NULL);
- *ok = false;
- return Statement::Default();
- }
- case EXTENDED_MODE:
- if (var_context != kSourceElement &&
- var_context != kForStatement) {
- Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "unprotected_const", NULL);
+ if (strict_mode() == STRICT) {
+ if (allow_harmony_scoping()) {
+ if (var_context != kSourceElement && var_context != kForStatement) {
+ ReportMessageAt(scanner()->peek_location(), "unprotected_const");
*ok = false;
return Statement::Default();
}
require_initializer = true;
- break;
+ } else {
+ Scanner::Location location = scanner()->peek_location();
+ ReportMessageAt(location, "strict_const");
+ *ok = false;
+ return Statement::Default();
+ }
}
} else if (peek() == Token::LET) {
// ES6 Draft Rev4 section 12.2.1:
@@ -375,19 +465,17 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
//
// * It is a Syntax Error if the code that matches this production is not
// contained in extended code.
- if (!is_extended_mode()) {
- Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "illegal_let", NULL);
+ //
+ // TODO(rossberg): make 'let' a legal identifier in sloppy mode.
+ if (!allow_harmony_scoping() || strict_mode() == SLOPPY) {
+ ReportMessageAt(scanner()->peek_location(), "illegal_let");
*ok = false;
return Statement::Default();
}
Consume(Token::LET);
if (var_context != kSourceElement &&
var_context != kForStatement) {
- Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location.beg_pos, location.end_pos,
- "unprotected_let", NULL);
+ ReportMessageAt(scanner()->peek_location(), "unprotected_let");
*ok = false;
return Statement::Default();
}
@@ -432,7 +520,7 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
// Expression is a single identifier, and not, e.g., a parenthesized
// identifier.
ASSERT(!expr.AsIdentifier().IsFutureReserved());
- ASSERT(scope_->is_classic_mode() ||
+ ASSERT(strict_mode() == SLOPPY ||
(!expr.AsIdentifier().IsFutureStrictReserved() &&
!expr.AsIdentifier().IsYield()));
Consume(Token::COLON);
@@ -530,9 +618,8 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
// WithStatement ::
// 'with' '(' Expression ')' Statement
Expect(Token::WITH, CHECK_OK);
- if (!scope_->is_classic_mode()) {
- Scanner::Location location = scanner()->location();
- ReportMessageAt(location, "strict_mode_with", NULL);
+ if (strict_mode() == STRICT) {
+ ReportMessageAt(scanner()->location(), "strict_mode_with");
*ok = false;
return Statement::Default();
}
@@ -540,7 +627,8 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- Scope::InsideWith iw(scope_);
+ PreParserScope with_scope(scope_, WITH_SCOPE);
+ BlockState block_state(&scope_, &with_scope);
ParseStatement(CHECK_OK);
return Statement::Default();
}
@@ -676,8 +764,7 @@ PreParser::Statement PreParser::ParseThrowStatement(bool* ok) {
Expect(Token::THROW, CHECK_OK);
if (scanner()->HasAnyLineTerminatorBeforeNext()) {
- Scanner::Location pos = scanner()->location();
- ReportMessageAt(pos, "newline_after_throw", NULL);
+ ReportMessageAt(scanner()->location(), "newline_after_throw");
*ok = false;
return Statement::Default();
}
@@ -705,7 +792,7 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
Token::Value tok = peek();
if (tok != Token::CATCH && tok != Token::FINALLY) {
- ReportMessageAt(scanner()->location(), "no_catch_or_finally", NULL);
+ ReportMessageAt(scanner()->location(), "no_catch_or_finally");
*ok = false;
return Statement::Default();
}
@@ -714,7 +801,9 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
Expect(Token::LPAREN, CHECK_OK);
ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- { Scope::InsideWith iw(scope_);
+ {
+ PreParserScope with_scope(scope_, WITH_SCOPE);
+ BlockState block_state(&scope_, &with_scope);
ParseBlock(CHECK_OK);
}
tok = peek();
@@ -748,561 +837,22 @@ PreParser::Statement PreParser::ParseDebuggerStatement(bool* ok) {
#undef DUMMY
-// Precedence = 1
-PreParser::Expression PreParser::ParseExpression(bool accept_IN, bool* ok) {
- // Expression ::
- // AssignmentExpression
- // Expression ',' AssignmentExpression
-
- Expression result = ParseAssignmentExpression(accept_IN, CHECK_OK);
- while (peek() == Token::COMMA) {
- Expect(Token::COMMA, CHECK_OK);
- ParseAssignmentExpression(accept_IN, CHECK_OK);
- result = Expression::Default();
- }
- return result;
-}
-
-
-// Precedence = 2
-PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
- bool* ok) {
- // AssignmentExpression ::
- // ConditionalExpression
- // YieldExpression
- // LeftHandSideExpression AssignmentOperator AssignmentExpression
-
- if (scope_->is_generator() && peek() == Token::YIELD) {
- return ParseYieldExpression(ok);
- }
-
- Scanner::Location before = scanner()->peek_location();
- Expression expression = ParseConditionalExpression(accept_IN, CHECK_OK);
-
- if (!Token::IsAssignmentOp(peek())) {
- // Parsed conditional expression only (no assignment).
- return expression;
- }
-
- if (!scope_->is_classic_mode() &&
- expression.IsIdentifier() &&
- expression.AsIdentifier().IsEvalOrArguments()) {
- Scanner::Location after = scanner()->location();
- ReportMessageAt(before.beg_pos, after.end_pos,
- "strict_eval_arguments", NULL);
- *ok = false;
- return Expression::Default();
- }
-
- Token::Value op = Next(); // Get assignment operator.
- ParseAssignmentExpression(accept_IN, CHECK_OK);
-
- if ((op == Token::ASSIGN) && expression.IsThisProperty()) {
- scope_->AddProperty();
- }
-
- return Expression::Default();
-}
-
-
-// Precedence = 3
-PreParser::Expression PreParser::ParseYieldExpression(bool* ok) {
- // YieldExpression ::
- // 'yield' '*'? AssignmentExpression
- Consume(Token::YIELD);
- Check(Token::MUL);
-
- ParseAssignmentExpression(false, CHECK_OK);
-
- return Expression::Default();
-}
-
-
-// Precedence = 3
-PreParser::Expression PreParser::ParseConditionalExpression(bool accept_IN,
- bool* ok) {
- // ConditionalExpression ::
- // LogicalOrExpression
- // LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
-
- // We start using the binary expression parser for prec >= 4 only!
- Expression expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
- if (peek() != Token::CONDITIONAL) return expression;
- Consume(Token::CONDITIONAL);
- // In parsing the first assignment expression in conditional
- // expressions we always accept the 'in' keyword; see ECMA-262,
- // section 11.12, page 58.
- ParseAssignmentExpression(true, CHECK_OK);
- Expect(Token::COLON, CHECK_OK);
- ParseAssignmentExpression(accept_IN, CHECK_OK);
- return Expression::Default();
-}
-
-
-// Precedence >= 4
-PreParser::Expression PreParser::ParseBinaryExpression(int prec,
- bool accept_IN,
- bool* ok) {
- Expression result = ParseUnaryExpression(CHECK_OK);
- for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
- // prec1 >= 4
- while (Precedence(peek(), accept_IN) == prec1) {
- Next();
- ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
- result = Expression::Default();
- }
- }
- return result;
-}
-
-
-PreParser::Expression PreParser::ParseUnaryExpression(bool* ok) {
- // UnaryExpression ::
- // PostfixExpression
- // 'delete' UnaryExpression
- // 'void' UnaryExpression
- // 'typeof' UnaryExpression
- // '++' UnaryExpression
- // '--' UnaryExpression
- // '+' UnaryExpression
- // '-' UnaryExpression
- // '~' UnaryExpression
- // '!' UnaryExpression
-
- Token::Value op = peek();
- if (Token::IsUnaryOp(op)) {
- op = Next();
- ParseUnaryExpression(ok);
- return Expression::Default();
- } else if (Token::IsCountOp(op)) {
- op = Next();
- Scanner::Location before = scanner()->peek_location();
- Expression expression = ParseUnaryExpression(CHECK_OK);
- if (!scope_->is_classic_mode() &&
- expression.IsIdentifier() &&
- expression.AsIdentifier().IsEvalOrArguments()) {
- Scanner::Location after = scanner()->location();
- ReportMessageAt(before.beg_pos, after.end_pos,
- "strict_eval_arguments", NULL);
- *ok = false;
- }
- return Expression::Default();
- } else {
- return ParsePostfixExpression(ok);
- }
-}
-
-
-PreParser::Expression PreParser::ParsePostfixExpression(bool* ok) {
- // PostfixExpression ::
- // LeftHandSideExpression ('++' | '--')?
-
- Scanner::Location before = scanner()->peek_location();
- Expression expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
- Token::IsCountOp(peek())) {
- if (!scope_->is_classic_mode() &&
- expression.IsIdentifier() &&
- expression.AsIdentifier().IsEvalOrArguments()) {
- Scanner::Location after = scanner()->location();
- ReportMessageAt(before.beg_pos, after.end_pos,
- "strict_eval_arguments", NULL);
- *ok = false;
- return Expression::Default();
- }
- Next();
- return Expression::Default();
- }
- return expression;
-}
-
-
-PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
- // LeftHandSideExpression ::
- // (NewExpression | MemberExpression) ...
-
- Expression result = Expression::Default();
- if (peek() == Token::NEW) {
- result = ParseNewExpression(CHECK_OK);
- } else {
- result = ParseMemberExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case Token::LBRACK: {
- Consume(Token::LBRACK);
- ParseExpression(true, CHECK_OK);
- Expect(Token::RBRACK, CHECK_OK);
- if (result.IsThis()) {
- result = Expression::ThisProperty();
- } else {
- result = Expression::Default();
- }
- break;
- }
-
- case Token::LPAREN: {
- ParseArguments(CHECK_OK);
- result = Expression::Default();
- break;
- }
-
- case Token::PERIOD: {
- Consume(Token::PERIOD);
- ParseIdentifierName(CHECK_OK);
- if (result.IsThis()) {
- result = Expression::ThisProperty();
- } else {
- result = Expression::Default();
- }
- break;
- }
-
- default:
- return result;
- }
- }
-}
-
-
-PreParser::Expression PreParser::ParseNewExpression(bool* ok) {
- // NewExpression ::
- // ('new')+ MemberExpression
-
- // The grammar for new expressions is pretty warped. The keyword
- // 'new' can either be a part of the new expression (where it isn't
- // followed by an argument list) or a part of the member expression,
- // where it must be followed by an argument list. To accommodate
- // this, we parse the 'new' keywords greedily and keep track of how
- // many we have parsed. This information is then passed on to the
- // member expression parser, which is only allowed to match argument
- // lists as long as it has 'new' prefixes left
- unsigned new_count = 0;
- do {
- Consume(Token::NEW);
- new_count++;
- } while (peek() == Token::NEW);
-
- return ParseMemberWithNewPrefixesExpression(new_count, ok);
-}
-
-
-PreParser::Expression PreParser::ParseMemberExpression(bool* ok) {
- return ParseMemberWithNewPrefixesExpression(0, ok);
-}
-
-
-PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
- unsigned new_count, bool* ok) {
- // MemberExpression ::
- // (PrimaryExpression | FunctionLiteral)
- // ('[' Expression ']' | '.' Identifier | Arguments)*
-
- // Parse the initial primary or function expression.
- Expression result = Expression::Default();
- if (peek() == Token::FUNCTION) {
- Consume(Token::FUNCTION);
-
- bool is_generator = allow_generators() && Check(Token::MUL);
- Identifier name = Identifier::Default();
- bool is_strict_reserved_name = false;
- Scanner::Location function_name_location = Scanner::Location::invalid();
- if (peek_any_identifier()) {
- name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
- CHECK_OK);
- function_name_location = scanner()->location();
- }
- result = ParseFunctionLiteral(name,
- function_name_location,
- is_strict_reserved_name,
- is_generator,
- CHECK_OK);
- } else {
- result = ParsePrimaryExpression(CHECK_OK);
- }
-
- while (true) {
- switch (peek()) {
- case Token::LBRACK: {
- Consume(Token::LBRACK);
- ParseExpression(true, CHECK_OK);
- Expect(Token::RBRACK, CHECK_OK);
- if (result.IsThis()) {
- result = Expression::ThisProperty();
- } else {
- result = Expression::Default();
- }
- break;
- }
- case Token::PERIOD: {
- Consume(Token::PERIOD);
- ParseIdentifierName(CHECK_OK);
- if (result.IsThis()) {
- result = Expression::ThisProperty();
- } else {
- result = Expression::Default();
- }
- break;
- }
- case Token::LPAREN: {
- if (new_count == 0) return result;
- // Consume one of the new prefixes (already parsed).
- ParseArguments(CHECK_OK);
- new_count--;
- result = Expression::Default();
- break;
- }
- default:
- return result;
- }
- }
-}
-
-
-PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
- // PrimaryExpression ::
- // 'this'
- // 'null'
- // 'true'
- // 'false'
- // Identifier
- // Number
- // String
- // ArrayLiteral
- // ObjectLiteral
- // RegExpLiteral
- // '(' Expression ')'
-
- Expression result = Expression::Default();
- switch (peek()) {
- case Token::THIS: {
- Next();
- result = Expression::This();
- break;
- }
-
- case Token::FUTURE_RESERVED_WORD:
- case Token::FUTURE_STRICT_RESERVED_WORD:
- case Token::YIELD:
- case Token::IDENTIFIER: {
- // Using eval or arguments in this context is OK even in strict mode.
- Identifier id = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
- result = Expression::FromIdentifier(id);
- break;
- }
-
- case Token::NULL_LITERAL:
- case Token::TRUE_LITERAL:
- case Token::FALSE_LITERAL:
- case Token::NUMBER: {
- Next();
- break;
- }
- case Token::STRING: {
- Next();
- result = GetStringSymbol();
- break;
- }
-
- case Token::ASSIGN_DIV:
- result = ParseRegExpLiteral(true, CHECK_OK);
- break;
-
- case Token::DIV:
- result = ParseRegExpLiteral(false, CHECK_OK);
- break;
-
- case Token::LBRACK:
- result = ParseArrayLiteral(CHECK_OK);
- break;
-
- case Token::LBRACE:
- result = ParseObjectLiteral(CHECK_OK);
- break;
-
- case Token::LPAREN:
- Consume(Token::LPAREN);
- parenthesized_function_ = (peek() == Token::FUNCTION);
- result = ParseExpression(true, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
- break;
-
- case Token::MOD:
- result = ParseV8Intrinsic(CHECK_OK);
- break;
-
- default: {
- Token::Value next = Next();
- ReportUnexpectedToken(next);
- *ok = false;
- return Expression::Default();
- }
- }
-
- return result;
-}
-
-
-PreParser::Expression PreParser::ParseArrayLiteral(bool* ok) {
- // ArrayLiteral ::
- // '[' Expression? (',' Expression?)* ']'
- Expect(Token::LBRACK, CHECK_OK);
- while (peek() != Token::RBRACK) {
- if (peek() != Token::COMMA) {
- ParseAssignmentExpression(true, CHECK_OK);
- }
- if (peek() != Token::RBRACK) {
- Expect(Token::COMMA, CHECK_OK);
- }
- }
- Expect(Token::RBRACK, CHECK_OK);
-
- scope_->NextMaterializedLiteralIndex();
- return Expression::Default();
-}
-
-
-PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
- // ObjectLiteral ::
- // '{' (
- // ((IdentifierName | String | Number) ':' AssignmentExpression)
- // | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
- // )*[','] '}'
-
- ObjectLiteralChecker checker(this, language_mode());
-
- Expect(Token::LBRACE, CHECK_OK);
- while (peek() != Token::RBRACE) {
- Token::Value next = peek();
- switch (next) {
- case Token::IDENTIFIER:
- case Token::FUTURE_RESERVED_WORD:
- case Token::FUTURE_STRICT_RESERVED_WORD: {
- bool is_getter = false;
- bool is_setter = false;
- ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
- if ((is_getter || is_setter) && peek() != Token::COLON) {
- Token::Value name = Next();
- bool is_keyword = Token::IsKeyword(name);
- if (name != Token::IDENTIFIER &&
- name != Token::FUTURE_RESERVED_WORD &&
- name != Token::FUTURE_STRICT_RESERVED_WORD &&
- name != Token::NUMBER &&
- name != Token::STRING &&
- !is_keyword) {
- *ok = false;
- return Expression::Default();
- }
- if (!is_keyword) {
- LogSymbol();
- }
- PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
- checker.CheckProperty(name, type, CHECK_OK);
- ParseFunctionLiteral(Identifier::Default(),
- scanner()->location(),
- false, // reserved words are allowed here
- false, // not a generator
- CHECK_OK);
- if (peek() != Token::RBRACE) {
- Expect(Token::COMMA, CHECK_OK);
- }
- continue; // restart the while
- }
- checker.CheckProperty(next, kValueProperty, CHECK_OK);
- break;
- }
- case Token::STRING:
- Consume(next);
- checker.CheckProperty(next, kValueProperty, CHECK_OK);
- GetStringSymbol();
- break;
- case Token::NUMBER:
- Consume(next);
- checker.CheckProperty(next, kValueProperty, CHECK_OK);
- break;
- default:
- if (Token::IsKeyword(next)) {
- Consume(next);
- checker.CheckProperty(next, kValueProperty, CHECK_OK);
- } else {
- // Unexpected token.
- *ok = false;
- return Expression::Default();
- }
- }
-
- Expect(Token::COLON, CHECK_OK);
- ParseAssignmentExpression(true, CHECK_OK);
-
- // TODO(1240767): Consider allowing trailing comma.
- if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
- }
- Expect(Token::RBRACE, CHECK_OK);
-
- scope_->NextMaterializedLiteralIndex();
- return Expression::Default();
-}
-
-
-PreParser::Expression PreParser::ParseRegExpLiteral(bool seen_equal,
- bool* ok) {
- if (!scanner()->ScanRegExpPattern(seen_equal)) {
- Next();
- ReportMessageAt(scanner()->location(), "unterminated_regexp", NULL);
- *ok = false;
- return Expression::Default();
- }
-
- scope_->NextMaterializedLiteralIndex();
-
- if (!scanner()->ScanRegExpFlags()) {
- Next();
- ReportMessageAt(scanner()->location(), "invalid_regexp_flags", NULL);
- *ok = false;
- return Expression::Default();
- }
- Next();
- return Expression::Default();
-}
-
-
-PreParser::Arguments PreParser::ParseArguments(bool* ok) {
- // Arguments ::
- // '(' (AssignmentExpression)*[','] ')'
-
- Expect(Token::LPAREN, ok);
- if (!*ok) return -1;
- bool done = (peek() == Token::RPAREN);
- int argc = 0;
- while (!done) {
- ParseAssignmentExpression(true, ok);
- if (!*ok) return -1;
- argc++;
- done = (peek() == Token::RPAREN);
- if (!done) {
- Expect(Token::COMMA, ok);
- if (!*ok) return -1;
- }
- }
- Expect(Token::RPAREN, ok);
- return argc;
-}
-
PreParser::Expression PreParser::ParseFunctionLiteral(
Identifier function_name,
Scanner::Location function_name_location,
bool name_is_strict_reserved,
bool is_generator,
+ int function_token_pos,
+ FunctionLiteral::FunctionType function_type,
bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
// Parse function body.
ScopeType outer_scope_type = scope_->type();
- bool inside_with = scope_->IsInsideWith();
- Scope function_scope(&scope_, kFunctionScope);
- function_scope.set_is_generator(is_generator);
+ PreParserScope function_scope(scope_, FUNCTION_SCOPE);
+ FunctionState function_state(&function_state_, &scope_, &function_scope);
+ function_state.set_is_generator(is_generator);
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
@@ -1326,14 +876,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
reserved_error_loc = scanner()->location();
}
- int prev_value;
- if (scanner()->is_literal_ascii()) {
- prev_value =
- duplicate_finder.AddAsciiSymbol(scanner()->literal_ascii_string(), 1);
- } else {
- prev_value =
- duplicate_finder.AddUtf16Symbol(scanner()->literal_utf16_string(), 1);
- }
+ int prev_value = scanner()->FindSymbol(&duplicate_finder, 1);
if (!dupe_error_loc.IsValid() && prev_value != 0) {
dupe_error_loc = scanner()->location();
@@ -1346,16 +889,14 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
}
Expect(Token::RPAREN, CHECK_OK);
- // Determine if the function will be lazily compiled.
- // Currently only happens to top-level functions.
- // Optimistically assume that all top-level functions are lazily compiled.
- bool is_lazily_compiled = (outer_scope_type == kTopLevelScope &&
- !inside_with && allow_lazy() &&
- !parenthesized_function_);
+ // See Parser::ParseFunctionLiteral for more information about lazy parsing
+ // and lazy compilation.
+ bool is_lazily_parsed = (outer_scope_type == GLOBAL_SCOPE && allow_lazy() &&
+ !parenthesized_function_);
parenthesized_function_ = false;
Expect(Token::LBRACE, CHECK_OK);
- if (is_lazily_compiled) {
+ if (is_lazily_parsed) {
ParseLazyFunctionLiteralBody(CHECK_OK);
} else {
ParseSourceElements(Token::RBRACE, ok);
@@ -1364,40 +905,35 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
// Validate strict mode. We can do this only after parsing the function,
// since the function can declare itself strict.
- if (!scope_->is_classic_mode()) {
+ if (strict_mode() == STRICT) {
if (function_name.IsEvalOrArguments()) {
- ReportMessageAt(function_name_location, "strict_eval_arguments", NULL);
+ ReportMessageAt(function_name_location, "strict_eval_arguments");
*ok = false;
return Expression::Default();
}
if (name_is_strict_reserved) {
- ReportMessageAt(
- function_name_location, "unexpected_strict_reserved", NULL);
+ ReportMessageAt(function_name_location, "unexpected_strict_reserved");
*ok = false;
return Expression::Default();
}
if (eval_args_error_loc.IsValid()) {
- ReportMessageAt(eval_args_error_loc, "strict_eval_arguments",
- Vector<const char*>::empty());
+ ReportMessageAt(eval_args_error_loc, "strict_eval_arguments");
*ok = false;
return Expression::Default();
}
if (dupe_error_loc.IsValid()) {
- ReportMessageAt(dupe_error_loc, "strict_param_dupe",
- Vector<const char*>::empty());
+ ReportMessageAt(dupe_error_loc, "strict_param_dupe");
*ok = false;
return Expression::Default();
}
if (reserved_error_loc.IsValid()) {
- ReportMessageAt(reserved_error_loc, "unexpected_strict_reserved",
- Vector<const char*>::empty());
+ ReportMessageAt(reserved_error_loc, "unexpected_strict_reserved");
*ok = false;
return Expression::Default();
}
int end_position = scanner()->location().end_pos;
CheckOctalLiteral(start_position, end_position, CHECK_OK);
- return Expression::StrictFunction();
}
return Expression::Default();
@@ -1406,18 +942,19 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
int body_start = position();
- log_->PauseRecording();
+ bool is_logging = log_->ShouldLogSymbols();
+ if (is_logging) log_->PauseRecording();
ParseSourceElements(Token::RBRACE, ok);
- log_->ResumeRecording();
+ if (is_logging) log_->ResumeRecording();
if (!*ok) return;
// Position right after terminal '}'.
ASSERT_EQ(Token::RBRACE, scanner()->peek());
int body_end = scanner()->peek_location().end_pos;
log_->LogFunction(body_start, body_end,
- scope_->materialized_literal_count(),
- scope_->expected_properties(),
- language_mode());
+ function_state_->materialized_literal_count(),
+ function_state_->expected_property_count(),
+ strict_mode());
}
@@ -1440,166 +977,10 @@ PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
void PreParser::LogSymbol() {
- int identifier_pos = position();
- if (scanner()->is_literal_ascii()) {
- log_->LogAsciiSymbol(identifier_pos, scanner()->literal_ascii_string());
- } else {
- log_->LogUtf16Symbol(identifier_pos, scanner()->literal_utf16_string());
+ if (log_->ShouldLogSymbols()) {
+ scanner()->LogSymbol(log_, position());
}
}
-PreParser::Expression PreParser::GetStringSymbol() {
- const int kUseStrictLength = 10;
- const char* kUseStrictChars = "use strict";
- LogSymbol();
- if (scanner()->is_literal_ascii() &&
- scanner()->literal_length() == kUseStrictLength &&
- !scanner()->literal_contains_escapes() &&
- !strncmp(scanner()->literal_ascii_string().start(), kUseStrictChars,
- kUseStrictLength)) {
- return Expression::UseStrictStringLiteral();
- }
- return Expression::StringLiteral();
-}
-
-
-PreParser::Identifier PreParser::GetIdentifierSymbol() {
- LogSymbol();
- if (scanner()->current_token() == Token::FUTURE_RESERVED_WORD) {
- return Identifier::FutureReserved();
- } else if (scanner()->current_token() ==
- Token::FUTURE_STRICT_RESERVED_WORD) {
- return Identifier::FutureStrictReserved();
- } else if (scanner()->current_token() == Token::YIELD) {
- return Identifier::Yield();
- }
- if (scanner()->is_literal_ascii()) {
- // Detect strict-mode poison words.
- if (scanner()->literal_length() == 4 &&
- !strncmp(scanner()->literal_ascii_string().start(), "eval", 4)) {
- return Identifier::Eval();
- }
- if (scanner()->literal_length() == 9 &&
- !strncmp(scanner()->literal_ascii_string().start(), "arguments", 9)) {
- return Identifier::Arguments();
- }
- }
- return Identifier::Default();
-}
-
-
-// Parses an identifier that is valid for the current scope, in particular it
-// fails on strict mode future reserved keywords in a strict scope. If
-// allow_eval_or_arguments is kAllowEvalOrArguments, we allow "eval" or
-// "arguments" as identifier even in strict mode (this is needed in cases like
-// "var foo = eval;").
-PreParser::Identifier PreParser::ParseIdentifier(
- AllowEvalOrArgumentsAsIdentifier allow_eval_or_arguments,
- bool* ok) {
- Token::Value next = Next();
- if (next == Token::IDENTIFIER) {
- PreParser::Identifier name = GetIdentifierSymbol();
- if (allow_eval_or_arguments == kDontAllowEvalOrArguments &&
- !scope_->is_classic_mode() && name.IsEvalOrArguments()) {
- ReportMessageAt(scanner()->location(), "strict_eval_arguments", NULL);
- *ok = false;
- }
- return name;
- } else if (scope_->is_classic_mode() &&
- (next == Token::FUTURE_STRICT_RESERVED_WORD ||
- (next == Token::YIELD && !scope_->is_generator()))) {
- return GetIdentifierSymbol();
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return Identifier::Default();
- }
-}
-
-
-// Parses and identifier or a strict mode future reserved word, and indicate
-// whether it is strict mode future reserved.
-PreParser::Identifier PreParser::ParseIdentifierOrStrictReservedWord(
- bool* is_strict_reserved, bool* ok) {
- Token::Value next = Next();
- if (next == Token::IDENTIFIER) {
- *is_strict_reserved = false;
- } else if (next == Token::FUTURE_STRICT_RESERVED_WORD ||
- (next == Token::YIELD && !scope_->is_generator())) {
- *is_strict_reserved = true;
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return Identifier::Default();
- }
- return GetIdentifierSymbol();
-}
-
-
-PreParser::Identifier PreParser::ParseIdentifierName(bool* ok) {
- Token::Value next = Next();
- if (next != Token::IDENTIFIER &&
- next != Token::FUTURE_RESERVED_WORD &&
- next != Token::FUTURE_STRICT_RESERVED_WORD &&
- !Token::IsKeyword(next)) {
- ReportUnexpectedToken(next);
- *ok = false;
- return Identifier::Default();
- }
- return GetIdentifierSymbol();
-}
-
-#undef CHECK_OK
-
-
-// This function reads an identifier and determines whether or not it
-// is 'get' or 'set'.
-PreParser::Identifier PreParser::ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok) {
- Identifier result = ParseIdentifierName(ok);
- if (!*ok) return Identifier::Default();
- if (scanner()->is_literal_ascii() &&
- scanner()->literal_length() == 3) {
- const char* token = scanner()->literal_ascii_string().start();
- *is_get = strncmp(token, "get", 3) == 0;
- *is_set = !*is_get && strncmp(token, "set", 3) == 0;
- }
- return result;
-}
-
-
-void PreParser::ObjectLiteralChecker::CheckProperty(Token::Value property,
- PropertyKind type,
- bool* ok) {
- int old;
- if (property == Token::NUMBER) {
- old = finder_.AddNumber(scanner()->literal_ascii_string(), type);
- } else if (scanner()->is_literal_ascii()) {
- old = finder_.AddAsciiSymbol(scanner()->literal_ascii_string(), type);
- } else {
- old = finder_.AddUtf16Symbol(scanner()->literal_utf16_string(), type);
- }
- PropertyKind old_type = static_cast<PropertyKind>(old);
- if (HasConflict(old_type, type)) {
- if (IsDataDataConflict(old_type, type)) {
- // Both are data properties.
- if (language_mode_ == CLASSIC_MODE) return;
- parser()->ReportMessageAt(scanner()->location(),
- "strict_duplicate_property");
- } else if (IsDataAccessorConflict(old_type, type)) {
- // Both a data and an accessor property with the same name.
- parser()->ReportMessageAt(scanner()->location(),
- "accessor_data_property");
- } else {
- ASSERT(IsAccessorAccessorConflict(old_type, type));
- // Both accessors of the same type.
- parser()->ReportMessageAt(scanner()->location(),
- "accessor_get_set");
- }
- *ok = false;
- }
-}
-
} } // v8::internal
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index bcaab743e..080b77287 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -28,26 +28,84 @@
#ifndef V8_PREPARSER_H
#define V8_PREPARSER_H
+#include "func-name-inferrer.h"
#include "hashmap.h"
+#include "scopes.h"
#include "token.h"
#include "scanner.h"
+#include "v8.h"
namespace v8 {
namespace internal {
-// Common base class shared between parser and pre-parser.
-class ParserBase {
+// Common base class shared between parser and pre-parser. Traits encapsulate
+// the differences between Parser and PreParser:
+
+// - Return types: For example, Parser functions return Expression* and
+// PreParser functions return PreParserExpression.
+
+// - Creating parse tree nodes: Parser generates an AST during the recursive
+// descent. PreParser doesn't create a tree. Instead, it passes around minimal
+// data objects (PreParserExpression, PreParserIdentifier etc.) which contain
+// just enough data for the upper layer functions. PreParserFactory is
+// responsible for creating these dummy objects. It provides a similar kind of
+// interface as AstNodeFactory, so ParserBase doesn't need to care which one is
+// used.
+
+// - Miscellanous other tasks interleaved with the recursive descent. For
+// example, Parser keeps track of which function literals should be marked as
+// pretenured, and PreParser doesn't care.
+
+// The traits are expected to contain the following typedefs:
+// struct Traits {
+// // In particular...
+// struct Type {
+// // Used by FunctionState and BlockState.
+// typedef Scope;
+// typedef GeneratorVariable;
+// typedef Zone;
+// // Return types for traversing functions.
+// typedef Identifier;
+// typedef Expression;
+// typedef FunctionLiteral;
+// typedef ObjectLiteralProperty;
+// typedef Literal;
+// typedef ExpressionList;
+// typedef PropertyList;
+// // For constructing objects returned by the traversing functions.
+// typedef Factory;
+// };
+// // ...
+// };
+
+template <typename Traits>
+class ParserBase : public Traits {
public:
- ParserBase(Scanner* scanner, uintptr_t stack_limit)
- : scanner_(scanner),
+ // Shorten type names defined by Traits.
+ typedef typename Traits::Type::Expression ExpressionT;
+ typedef typename Traits::Type::Identifier IdentifierT;
+
+ ParserBase(Scanner* scanner, uintptr_t stack_limit,
+ v8::Extension* extension,
+ ParserRecorder* log,
+ typename Traits::Type::Zone* zone,
+ typename Traits::Type::Parser this_object)
+ : Traits(this_object),
+ parenthesized_function_(false),
+ scope_(NULL),
+ function_state_(NULL),
+ extension_(extension),
+ fni_(NULL),
+ log_(log),
+ mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
+ scanner_(scanner),
stack_limit_(stack_limit),
stack_overflow_(false),
allow_lazy_(false),
allow_natives_syntax_(false),
allow_generators_(false),
- allow_for_of_(false) { }
- // TODO(mstarzinger): Only virtual until message reporting has been unified.
- virtual ~ParserBase() { }
+ allow_for_of_(false),
+ zone_(zone) { }
// Getters that indicate whether certain syntactical constructs are
// allowed to be parsed by this instance of the parser.
@@ -81,13 +139,125 @@ class ParserBase {
kDontAllowEvalOrArguments
};
+ enum Mode {
+ PARSE_LAZILY,
+ PARSE_EAGERLY
+ };
+
+ // ---------------------------------------------------------------------------
+ // FunctionState and BlockState together implement the parser's scope stack.
+ // The parser's current scope is in scope_. BlockState and FunctionState
+ // constructors push on the scope stack and the destructors pop. They are also
+ // used to hold the parser's per-function and per-block state.
+ class BlockState BASE_EMBEDDED {
+ public:
+ BlockState(typename Traits::Type::Scope** scope_stack,
+ typename Traits::Type::Scope* scope)
+ : scope_stack_(scope_stack),
+ outer_scope_(*scope_stack),
+ scope_(scope) {
+ *scope_stack_ = scope_;
+ }
+ ~BlockState() { *scope_stack_ = outer_scope_; }
+
+ private:
+ typename Traits::Type::Scope** scope_stack_;
+ typename Traits::Type::Scope* outer_scope_;
+ typename Traits::Type::Scope* scope_;
+ };
+
+ class FunctionState BASE_EMBEDDED {
+ public:
+ FunctionState(
+ FunctionState** function_state_stack,
+ typename Traits::Type::Scope** scope_stack,
+ typename Traits::Type::Scope* scope,
+ typename Traits::Type::Zone* zone = NULL);
+ ~FunctionState();
+
+ int NextMaterializedLiteralIndex() {
+ return next_materialized_literal_index_++;
+ }
+ int materialized_literal_count() {
+ return next_materialized_literal_index_ - JSFunction::kLiteralsPrefixSize;
+ }
+
+ int NextHandlerIndex() { return next_handler_index_++; }
+ int handler_count() { return next_handler_index_; }
+
+ void AddProperty() { expected_property_count_++; }
+ int expected_property_count() { return expected_property_count_; }
+
+ void set_is_generator(bool is_generator) { is_generator_ = is_generator; }
+ bool is_generator() const { return is_generator_; }
+
+ void set_generator_object_variable(
+ typename Traits::Type::GeneratorVariable* variable) {
+ ASSERT(variable != NULL);
+ ASSERT(!is_generator());
+ generator_object_variable_ = variable;
+ is_generator_ = true;
+ }
+ typename Traits::Type::GeneratorVariable* generator_object_variable()
+ const {
+ return generator_object_variable_;
+ }
+
+ typename Traits::Type::Factory* factory() { return &factory_; }
+
+ private:
+ // Used to assign an index to each literal that needs materialization in
+ // the function. Includes regexp literals, and boilerplate for object and
+ // array literals.
+ int next_materialized_literal_index_;
+
+ // Used to assign a per-function index to try and catch handlers.
+ int next_handler_index_;
+
+ // Properties count estimation.
+ int expected_property_count_;
+
+ // Whether the function is a generator.
+ bool is_generator_;
+ // For generators, this variable may hold the generator object. It variable
+ // is used by yield expressions and return statements. It is not necessary
+ // for generator functions to have this variable set.
+ Variable* generator_object_variable_;
+
+ FunctionState** function_state_stack_;
+ FunctionState* outer_function_state_;
+ typename Traits::Type::Scope** scope_stack_;
+ typename Traits::Type::Scope* outer_scope_;
+ Isolate* isolate_; // Only used by ParserTraits.
+ int saved_ast_node_id_; // Only used by ParserTraits.
+ typename Traits::Type::Factory factory_;
+
+ friend class ParserTraits;
+ };
+
+ class ParsingModeScope BASE_EMBEDDED {
+ public:
+ ParsingModeScope(ParserBase* parser, Mode mode)
+ : parser_(parser),
+ old_mode_(parser->mode()) {
+ parser_->mode_ = mode;
+ }
+ ~ParsingModeScope() {
+ parser_->mode_ = old_mode_;
+ }
+
+ private:
+ ParserBase* parser_;
+ Mode old_mode_;
+ };
+
Scanner* scanner() const { return scanner_; }
int position() { return scanner_->location().beg_pos; }
int peek_position() { return scanner_->peek_location().beg_pos; }
bool stack_overflow() const { return stack_overflow_; }
void set_stack_overflow() { stack_overflow_ = true; }
-
- virtual bool is_classic_mode() = 0;
+ Mode mode() const { return mode_; }
+ typename Traits::Type::Zone* zone() const { return zone_; }
INLINE(Token::Value peek()) {
if (stack_overflow_) return Token::ILLEGAL;
@@ -132,25 +302,128 @@ class ParserBase {
}
}
- bool peek_any_identifier();
- void ExpectSemicolon(bool* ok);
- bool CheckContextualKeyword(Vector<const char> keyword);
- void ExpectContextualKeyword(Vector<const char> keyword, bool* ok);
+ void ExpectSemicolon(bool* ok) {
+ // Check for automatic semicolon insertion according to
+ // the rules given in ECMA-262, section 7.9, page 21.
+ Token::Value tok = peek();
+ if (tok == Token::SEMICOLON) {
+ Next();
+ return;
+ }
+ if (scanner()->HasAnyLineTerminatorBeforeNext() ||
+ tok == Token::RBRACE ||
+ tok == Token::EOS) {
+ return;
+ }
+ Expect(Token::SEMICOLON, ok);
+ }
- // Strict mode octal literal validation.
- void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
+ bool peek_any_identifier() {
+ Token::Value next = peek();
+ return next == Token::IDENTIFIER ||
+ next == Token::FUTURE_RESERVED_WORD ||
+ next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ next == Token::YIELD;
+ }
+
+ bool CheckContextualKeyword(Vector<const char> keyword) {
+ if (peek() == Token::IDENTIFIER &&
+ scanner()->is_next_contextual_keyword(keyword)) {
+ Consume(Token::IDENTIFIER);
+ return true;
+ }
+ return false;
+ }
+
+ void ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
+ Expect(Token::IDENTIFIER, ok);
+ if (!*ok) return;
+ if (!scanner()->is_literal_contextual_keyword(keyword)) {
+ ReportUnexpectedToken(scanner()->current_token());
+ *ok = false;
+ }
+ }
+
+ // Checks whether an octal literal was last seen between beg_pos and end_pos.
+ // If so, reports an error. Only called for strict mode.
+ void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
+ Scanner::Location octal = scanner()->octal_position();
+ if (octal.IsValid() && beg_pos <= octal.beg_pos &&
+ octal.end_pos <= end_pos) {
+ ReportMessageAt(octal, "strict_octal_literal");
+ scanner()->clear_octal_position();
+ *ok = false;
+ }
+ }
// Determine precedence of given token.
- static int Precedence(Token::Value token, bool accept_IN);
+ static int Precedence(Token::Value token, bool accept_IN) {
+ if (token == Token::IN && !accept_IN)
+ return 0; // 0 precedence will terminate binary expression parsing
+ return Token::Precedence(token);
+ }
+
+ typename Traits::Type::Factory* factory() {
+ return function_state_->factory();
+ }
+
+ StrictMode strict_mode() { return scope_->strict_mode(); }
+ bool is_generator() const { return function_state_->is_generator(); }
// Report syntax errors.
- void ReportUnexpectedToken(Token::Value token);
- void ReportMessageAt(Scanner::Location location, const char* type) {
- ReportMessageAt(location, type, Vector<const char*>::empty());
+ void ReportMessage(const char* message, Vector<const char*> args,
+ bool is_reference_error = false) {
+ Scanner::Location source_location = scanner()->location();
+ Traits::ReportMessageAt(source_location, message, args, is_reference_error);
}
- virtual void ReportMessageAt(Scanner::Location source_location,
- const char* message,
- Vector<const char*> args) = 0;
+
+ void ReportMessageAt(Scanner::Location location, const char* message,
+ bool is_reference_error = false) {
+ Traits::ReportMessageAt(location, message, Vector<const char*>::empty(),
+ is_reference_error);
+ }
+
+ void ReportUnexpectedToken(Token::Value token);
+
+ // Recursive descent functions:
+
+ // Parses an identifier that is valid for the current scope, in particular it
+ // fails on strict mode future reserved keywords in a strict scope. If
+ // allow_eval_or_arguments is kAllowEvalOrArguments, we allow "eval" or
+ // "arguments" as identifier even in strict mode (this is needed in cases like
+ // "var foo = eval;").
+ IdentifierT ParseIdentifier(
+ AllowEvalOrArgumentsAsIdentifier,
+ bool* ok);
+ // Parses an identifier or a strict mode future reserved word, and indicate
+ // whether it is strict mode future reserved.
+ IdentifierT ParseIdentifierOrStrictReservedWord(
+ bool* is_strict_reserved,
+ bool* ok);
+ IdentifierT ParseIdentifierName(bool* ok);
+ // Parses an identifier and determines whether or not it is 'get' or 'set'.
+ IdentifierT ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok);
+
+ ExpressionT ParseRegExpLiteral(bool seen_equal, bool* ok);
+
+ ExpressionT ParsePrimaryExpression(bool* ok);
+ ExpressionT ParseExpression(bool accept_IN, bool* ok);
+ ExpressionT ParseArrayLiteral(bool* ok);
+ ExpressionT ParseObjectLiteral(bool* ok);
+ typename Traits::Type::ExpressionList ParseArguments(bool* ok);
+ ExpressionT ParseAssignmentExpression(bool accept_IN, bool* ok);
+ ExpressionT ParseYieldExpression(bool* ok);
+ ExpressionT ParseConditionalExpression(bool accept_IN, bool* ok);
+ ExpressionT ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
+ ExpressionT ParseUnaryExpression(bool* ok);
+ ExpressionT ParsePostfixExpression(bool* ok);
+ ExpressionT ParseLeftHandSideExpression(bool* ok);
+ ExpressionT ParseMemberWithNewPrefixesExpression(bool* ok);
+ ExpressionT ParseMemberExpression(bool* ok);
+ ExpressionT ParseMemberExpressionContinuation(ExpressionT expression,
+ bool* ok);
// Used to detect duplicates in object literals. Each of the values
// kGetterProperty, kSetterProperty and kValueProperty represents
@@ -176,10 +449,10 @@ class ParserBase {
// Validation per ECMA 262 - 11.1.5 "Object Initialiser".
class ObjectLiteralChecker {
public:
- ObjectLiteralChecker(ParserBase* parser, LanguageMode mode)
+ ObjectLiteralChecker(ParserBase* parser, StrictMode strict_mode)
: parser_(parser),
finder_(scanner()->unicode_cache()),
- language_mode_(mode) { }
+ strict_mode_(strict_mode) { }
void CheckProperty(Token::Value property, PropertyKind type, bool* ok);
@@ -203,9 +476,22 @@ class ParserBase {
ParserBase* parser_;
DuplicateFinder finder_;
- LanguageMode language_mode_;
+ StrictMode strict_mode_;
};
+ // If true, the next (and immediately following) function literal is
+ // preceded by a parenthesis.
+ // Heuristically that means that the function will be called immediately,
+ // so never lazily compile it.
+ bool parenthesized_function_;
+
+ typename Traits::Type::Scope* scope_; // Scope stack.
+ FunctionState* function_state_; // Function state stack.
+ v8::Extension* extension_;
+ FuncNameInferrer* fni_;
+ ParserRecorder* log_;
+ Mode mode_;
+
private:
Scanner* scanner_;
uintptr_t stack_limit_;
@@ -215,6 +501,490 @@ class ParserBase {
bool allow_natives_syntax_;
bool allow_generators_;
bool allow_for_of_;
+
+ typename Traits::Type::Zone* zone_; // Only used by Parser.
+};
+
+
+class PreParserIdentifier {
+ public:
+ PreParserIdentifier() : type_(kUnknownIdentifier) {}
+ static PreParserIdentifier Default() {
+ return PreParserIdentifier(kUnknownIdentifier);
+ }
+ static PreParserIdentifier Eval() {
+ return PreParserIdentifier(kEvalIdentifier);
+ }
+ static PreParserIdentifier Arguments() {
+ return PreParserIdentifier(kArgumentsIdentifier);
+ }
+ static PreParserIdentifier FutureReserved() {
+ return PreParserIdentifier(kFutureReservedIdentifier);
+ }
+ static PreParserIdentifier FutureStrictReserved() {
+ return PreParserIdentifier(kFutureStrictReservedIdentifier);
+ }
+ static PreParserIdentifier Yield() {
+ return PreParserIdentifier(kYieldIdentifier);
+ }
+ bool IsEval() { return type_ == kEvalIdentifier; }
+ bool IsArguments() { return type_ == kArgumentsIdentifier; }
+ bool IsEvalOrArguments() { return type_ >= kEvalIdentifier; }
+ bool IsYield() { return type_ == kYieldIdentifier; }
+ bool IsFutureReserved() { return type_ == kFutureReservedIdentifier; }
+ bool IsFutureStrictReserved() {
+ return type_ == kFutureStrictReservedIdentifier;
+ }
+ bool IsValidStrictVariable() { return type_ == kUnknownIdentifier; }
+
+ private:
+ enum Type {
+ kUnknownIdentifier,
+ kFutureReservedIdentifier,
+ kFutureStrictReservedIdentifier,
+ kYieldIdentifier,
+ kEvalIdentifier,
+ kArgumentsIdentifier
+ };
+ explicit PreParserIdentifier(Type type) : type_(type) {}
+ Type type_;
+
+ friend class PreParserExpression;
+};
+
+
+// Bits 0 and 1 are used to identify the type of expression:
+// If bit 0 is set, it's an identifier.
+// if bit 1 is set, it's a string literal.
+// If neither is set, it's no particular type, and both set isn't
+// use yet.
+class PreParserExpression {
+ public:
+ static PreParserExpression Default() {
+ return PreParserExpression(kUnknownExpression);
+ }
+
+ static PreParserExpression FromIdentifier(PreParserIdentifier id) {
+ return PreParserExpression(kIdentifierFlag |
+ (id.type_ << kIdentifierShift));
+ }
+
+ static PreParserExpression StringLiteral() {
+ return PreParserExpression(kUnknownStringLiteral);
+ }
+
+ static PreParserExpression UseStrictStringLiteral() {
+ return PreParserExpression(kUseStrictString);
+ }
+
+ static PreParserExpression This() {
+ return PreParserExpression(kThisExpression);
+ }
+
+ static PreParserExpression ThisProperty() {
+ return PreParserExpression(kThisPropertyExpression);
+ }
+
+ static PreParserExpression Property() {
+ return PreParserExpression(kPropertyExpression);
+ }
+
+ bool IsIdentifier() { return (code_ & kIdentifierFlag) != 0; }
+
+ // Only works corretly if it is actually an identifier expression.
+ PreParserIdentifier AsIdentifier() {
+ return PreParserIdentifier(
+ static_cast<PreParserIdentifier::Type>(code_ >> kIdentifierShift));
+ }
+
+ bool IsStringLiteral() { return (code_ & kStringLiteralFlag) != 0; }
+
+ bool IsUseStrictLiteral() {
+ return (code_ & kStringLiteralMask) == kUseStrictString;
+ }
+
+ bool IsThis() { return code_ == kThisExpression; }
+
+ bool IsThisProperty() { return code_ == kThisPropertyExpression; }
+
+ bool IsProperty() {
+ return code_ == kPropertyExpression || code_ == kThisPropertyExpression;
+ }
+
+ bool IsValidLeftHandSide() {
+ return IsIdentifier() || IsProperty();
+ }
+
+ // At the moment PreParser doesn't track these expression types.
+ bool IsFunctionLiteral() const { return false; }
+ bool IsCall() const { return false; }
+ bool IsCallNew() const { return false; }
+
+ PreParserExpression AsFunctionLiteral() { return *this; }
+
+ // Dummy implementation for making expression->somefunc() work in both Parser
+ // and PreParser.
+ PreParserExpression* operator->() { return this; }
+
+ // More dummy implementations of things PreParser doesn't need to track:
+ void set_index(int index) {} // For YieldExpressions
+ void set_parenthesized() {}
+
+ private:
+ // Least significant 2 bits are used as flags. Bits 0 and 1 represent
+ // identifiers or strings literals, and are mutually exclusive, but can both
+ // be absent. If the expression is an identifier or a string literal, the
+ // other bits describe the type (see PreParserIdentifier::Type and string
+ // literal constants below).
+ enum {
+ kUnknownExpression = 0,
+ // Identifiers
+ kIdentifierFlag = 1, // Used to detect labels.
+ kIdentifierShift = 3,
+
+ kStringLiteralFlag = 2, // Used to detect directive prologue.
+ kUnknownStringLiteral = kStringLiteralFlag,
+ kUseStrictString = kStringLiteralFlag | 8,
+ kStringLiteralMask = kUseStrictString,
+
+ // Below here applies if neither identifier nor string literal. Reserve the
+ // 2 least significant bits for flags.
+ kThisExpression = 1 << 2,
+ kThisPropertyExpression = 2 << 2,
+ kPropertyExpression = 3 << 2
+ };
+
+ explicit PreParserExpression(int expression_code) : code_(expression_code) {}
+
+ int code_;
+};
+
+
+// PreParserExpressionList doesn't actually store the expressions because
+// PreParser doesn't need to.
+class PreParserExpressionList {
+ public:
+ // These functions make list->Add(some_expression) work (and do nothing).
+ PreParserExpressionList() : length_(0) {}
+ PreParserExpressionList* operator->() { return this; }
+ void Add(PreParserExpression, void*) { ++length_; }
+ int length() const { return length_; }
+ private:
+ int length_;
+};
+
+
+class PreParserScope {
+ public:
+ explicit PreParserScope(PreParserScope* outer_scope, ScopeType scope_type)
+ : scope_type_(scope_type) {
+ strict_mode_ = outer_scope ? outer_scope->strict_mode() : SLOPPY;
+ }
+
+ ScopeType type() { return scope_type_; }
+ StrictMode strict_mode() const { return strict_mode_; }
+ void SetStrictMode(StrictMode strict_mode) { strict_mode_ = strict_mode; }
+
+ private:
+ ScopeType scope_type_;
+ StrictMode strict_mode_;
+};
+
+
+class PreParserFactory {
+ public:
+ explicit PreParserFactory(void* extra_param) {}
+ PreParserExpression NewLiteral(PreParserIdentifier identifier,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewNumberLiteral(double number,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewRegExpLiteral(PreParserIdentifier js_pattern,
+ PreParserIdentifier js_flags,
+ int literal_index,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewArrayLiteral(PreParserExpressionList values,
+ int literal_index,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewObjectLiteralProperty(bool is_getter,
+ PreParserExpression value,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
+ PreParserExpression value) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewObjectLiteral(PreParserExpressionList properties,
+ int literal_index,
+ int boilerplate_properties,
+ bool has_function,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewVariableProxy(void* generator_variable) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewProperty(PreParserExpression obj,
+ PreParserExpression key,
+ int pos) {
+ if (obj.IsThis()) {
+ return PreParserExpression::ThisProperty();
+ }
+ return PreParserExpression::Property();
+ }
+ PreParserExpression NewUnaryOperation(Token::Value op,
+ PreParserExpression expression,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewBinaryOperation(Token::Value op,
+ PreParserExpression left,
+ PreParserExpression right, int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewCompareOperation(Token::Value op,
+ PreParserExpression left,
+ PreParserExpression right, int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewAssignment(Token::Value op,
+ PreParserExpression left,
+ PreParserExpression right,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewYield(PreParserExpression generator_object,
+ PreParserExpression expression,
+ Yield::Kind yield_kind,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewConditional(PreParserExpression condition,
+ PreParserExpression then_expression,
+ PreParserExpression else_expression,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewCountOperation(Token::Value op,
+ bool is_prefix,
+ PreParserExpression expression,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewCall(PreParserExpression expression,
+ PreParserExpressionList arguments,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewCallNew(PreParserExpression expression,
+ PreParserExpressionList arguments,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+};
+
+
+class PreParser;
+
+class PreParserTraits {
+ public:
+ struct Type {
+ // TODO(marja): To be removed. The Traits object should contain all the data
+ // it needs.
+ typedef PreParser* Parser;
+
+ // Used by FunctionState and BlockState.
+ typedef PreParserScope Scope;
+ // PreParser doesn't need to store generator variables.
+ typedef void GeneratorVariable;
+ // No interaction with Zones.
+ typedef void Zone;
+
+ // Return types for traversing functions.
+ typedef PreParserIdentifier Identifier;
+ typedef PreParserExpression Expression;
+ typedef PreParserExpression YieldExpression;
+ typedef PreParserExpression FunctionLiteral;
+ typedef PreParserExpression ObjectLiteralProperty;
+ typedef PreParserExpression Literal;
+ typedef PreParserExpressionList ExpressionList;
+ typedef PreParserExpressionList PropertyList;
+
+ // For constructing objects returned by the traversing functions.
+ typedef PreParserFactory Factory;
+ };
+
+ explicit PreParserTraits(PreParser* pre_parser) : pre_parser_(pre_parser) {}
+
+ // Custom operations executed when FunctionStates are created and
+ // destructed. (The PreParser doesn't need to do anything.)
+ template<typename FunctionState>
+ static void SetUpFunctionState(FunctionState* function_state, void*) {}
+ template<typename FunctionState>
+ static void TearDownFunctionState(FunctionState* function_state) {}
+
+ // Helper functions for recursive descent.
+ static bool IsEvalOrArguments(PreParserIdentifier identifier) {
+ return identifier.IsEvalOrArguments();
+ }
+
+ // Returns true if the expression is of type "this.foo".
+ static bool IsThisProperty(PreParserExpression expression) {
+ return expression.IsThisProperty();
+ }
+
+ static bool IsIdentifier(PreParserExpression expression) {
+ return expression.IsIdentifier();
+ }
+
+ static bool IsBoilerplateProperty(PreParserExpression property) {
+ // PreParser doesn't count boilerplate properties.
+ return false;
+ }
+
+ static bool IsArrayIndex(PreParserIdentifier string, uint32_t* index) {
+ return false;
+ }
+
+ // Functions for encapsulating the differences between parsing and preparsing;
+ // operations interleaved with the recursive descent.
+ static void PushLiteralName(FuncNameInferrer* fni, PreParserIdentifier id) {
+ // PreParser should not use FuncNameInferrer.
+ UNREACHABLE();
+ }
+ static void PushPropertyName(FuncNameInferrer* fni,
+ PreParserExpression expression) {
+ // PreParser should not use FuncNameInferrer.
+ UNREACHABLE();
+ }
+
+ static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
+ PreParserScope* scope, PreParserExpression value, bool* has_function) {}
+
+ static void CheckAssigningFunctionLiteralToProperty(
+ PreParserExpression left, PreParserExpression right) {}
+
+ // PreParser doesn't need to keep track of eval calls.
+ static void CheckPossibleEvalCall(PreParserExpression expression,
+ PreParserScope* scope) {}
+
+ static PreParserExpression MarkExpressionAsLValue(
+ PreParserExpression expression) {
+ // TODO(marja): To be able to produce the same errors, the preparser needs
+ // to start tracking which expressions are variables and which are lvalues.
+ return expression;
+ }
+
+ // Checks LHS expression for assignment and prefix/postfix increment/decrement
+ // in strict mode.
+ void CheckStrictModeLValue(PreParserExpression expression, bool* ok);
+
+ bool ShortcutNumericLiteralBinaryExpression(PreParserExpression* x,
+ PreParserExpression y,
+ Token::Value op,
+ int pos,
+ PreParserFactory* factory) {
+ return false;
+ }
+
+ PreParserExpression BuildUnaryExpression(PreParserExpression expression,
+ Token::Value op, int pos,
+ PreParserFactory* factory) {
+ return PreParserExpression::Default();
+ }
+
+ // Reporting errors.
+ void ReportMessageAt(Scanner::Location location,
+ const char* message,
+ Vector<const char*> args,
+ bool is_reference_error = false);
+ void ReportMessageAt(Scanner::Location location,
+ const char* type,
+ const char* name_opt,
+ bool is_reference_error = false);
+ void ReportMessageAt(int start_pos,
+ int end_pos,
+ const char* type,
+ const char* name_opt,
+ bool is_reference_error = false);
+
+ // "null" return type creators.
+ static PreParserIdentifier EmptyIdentifier() {
+ return PreParserIdentifier::Default();
+ }
+ static PreParserExpression EmptyExpression() {
+ return PreParserExpression::Default();
+ }
+ static PreParserExpression EmptyLiteral() {
+ return PreParserExpression::Default();
+ }
+ static PreParserExpressionList NullExpressionList() {
+ return PreParserExpressionList();
+ }
+
+ // Odd-ball literal creators.
+ static PreParserExpression GetLiteralTheHole(int position,
+ PreParserFactory* factory) {
+ return PreParserExpression::Default();
+ }
+
+ // Producing data during the recursive descent.
+ PreParserIdentifier GetSymbol(Scanner* scanner);
+ static PreParserIdentifier NextLiteralString(Scanner* scanner,
+ PretenureFlag tenured) {
+ return PreParserIdentifier::Default();
+ }
+
+ static PreParserExpression ThisExpression(PreParserScope* scope,
+ PreParserFactory* factory) {
+ return PreParserExpression::This();
+ }
+
+ static PreParserExpression ExpressionFromLiteral(
+ Token::Value token, int pos, Scanner* scanner,
+ PreParserFactory* factory) {
+ return PreParserExpression::Default();
+ }
+
+ static PreParserExpression ExpressionFromIdentifier(
+ PreParserIdentifier name, int pos, PreParserScope* scope,
+ PreParserFactory* factory) {
+ return PreParserExpression::FromIdentifier(name);
+ }
+
+ PreParserExpression ExpressionFromString(int pos,
+ Scanner* scanner,
+ PreParserFactory* factory = NULL);
+
+ static PreParserExpressionList NewExpressionList(int size, void* zone) {
+ return PreParserExpressionList();
+ }
+
+ static PreParserExpressionList NewPropertyList(int size, void* zone) {
+ return PreParserExpressionList();
+ }
+
+ // Temporary glue; these functions will move to ParserBase.
+ PreParserExpression ParseV8Intrinsic(bool* ok);
+ PreParserExpression ParseFunctionLiteral(
+ PreParserIdentifier name,
+ Scanner::Location function_name_location,
+ bool name_is_strict_reserved,
+ bool is_generator,
+ int function_token_position,
+ FunctionLiteral::FunctionType type,
+ bool* ok);
+
+ private:
+ PreParser* pre_parser_;
};
@@ -230,36 +1000,34 @@ class ParserBase {
// rather it is to speed up properly written and correct programs.
// That means that contextual checks (like a label being declared where
// it is used) are generally omitted.
-class PreParser : public ParserBase {
+class PreParser : public ParserBase<PreParserTraits> {
public:
+ typedef PreParserIdentifier Identifier;
+ typedef PreParserExpression Expression;
+
enum PreParseResult {
kPreParseStackOverflow,
kPreParseSuccess
};
- PreParser(Scanner* scanner,
- ParserRecorder* log,
- uintptr_t stack_limit)
- : ParserBase(scanner, stack_limit),
- log_(log),
- scope_(NULL),
- parenthesized_function_(false) { }
-
- ~PreParser() {}
+ PreParser(Scanner* scanner, ParserRecorder* log, uintptr_t stack_limit)
+ : ParserBase<PreParserTraits>(scanner, stack_limit, NULL, log, NULL,
+ this) {}
// Pre-parse the program from the character stream; returns true on
// success (even if parsing failed, the pre-parse data successfully
// captured the syntax error), and false if a stack-overflow happened
// during parsing.
PreParseResult PreParseProgram() {
- Scope top_scope(&scope_, kTopLevelScope);
+ PreParserScope scope(scope_, GLOBAL_SCOPE);
+ FunctionState top_scope(&function_state_, &scope_, &scope, NULL);
bool ok = true;
int start_position = scanner()->peek_location().beg_pos;
ParseSourceElements(Token::EOS, &ok);
if (stack_overflow()) return kPreParseStackOverflow;
if (!ok) {
ReportUnexpectedToken(scanner()->current_token());
- } else if (!scope_->is_classic_mode()) {
+ } else if (scope_->strict_mode() == STRICT) {
CheckOctalLiteral(start_position, scanner()->location().end_pos, &ok);
}
return kPreParseSuccess;
@@ -273,21 +1041,18 @@ class PreParser : public ParserBase {
// keyword and parameters, and have consumed the initial '{'.
// At return, unless an error occurred, the scanner is positioned before the
// the final '}'.
- PreParseResult PreParseLazyFunction(LanguageMode mode,
+ PreParseResult PreParseLazyFunction(StrictMode strict_mode,
bool is_generator,
ParserRecorder* log);
private:
+ friend class PreParserTraits;
+
// These types form an algebra over syntactic categories that is just
// rich enough to let us recognize and propagate the constructs that
// are either being counted in the preparser data, or is important
// to throw the correct syntax error exceptions.
- enum ScopeType {
- kTopLevelScope,
- kFunctionScope
- };
-
enum VariableDeclarationContext {
kSourceElement,
kStatement,
@@ -300,142 +1065,6 @@ class PreParser : public ParserBase {
kHasNoInitializers
};
- class Expression;
-
- class Identifier {
- public:
- static Identifier Default() {
- return Identifier(kUnknownIdentifier);
- }
- static Identifier Eval() {
- return Identifier(kEvalIdentifier);
- }
- static Identifier Arguments() {
- return Identifier(kArgumentsIdentifier);
- }
- static Identifier FutureReserved() {
- return Identifier(kFutureReservedIdentifier);
- }
- static Identifier FutureStrictReserved() {
- return Identifier(kFutureStrictReservedIdentifier);
- }
- static Identifier Yield() {
- return Identifier(kYieldIdentifier);
- }
- bool IsEval() { return type_ == kEvalIdentifier; }
- bool IsArguments() { return type_ == kArgumentsIdentifier; }
- bool IsEvalOrArguments() { return type_ >= kEvalIdentifier; }
- bool IsYield() { return type_ == kYieldIdentifier; }
- bool IsFutureReserved() { return type_ == kFutureReservedIdentifier; }
- bool IsFutureStrictReserved() {
- return type_ == kFutureStrictReservedIdentifier;
- }
- bool IsValidStrictVariable() { return type_ == kUnknownIdentifier; }
-
- private:
- enum Type {
- kUnknownIdentifier,
- kFutureReservedIdentifier,
- kFutureStrictReservedIdentifier,
- kYieldIdentifier,
- kEvalIdentifier,
- kArgumentsIdentifier
- };
- explicit Identifier(Type type) : type_(type) { }
- Type type_;
-
- friend class Expression;
- };
-
- // Bits 0 and 1 are used to identify the type of expression:
- // If bit 0 is set, it's an identifier.
- // if bit 1 is set, it's a string literal.
- // If neither is set, it's no particular type, and both set isn't
- // use yet.
- class Expression {
- public:
- static Expression Default() {
- return Expression(kUnknownExpression);
- }
-
- static Expression FromIdentifier(Identifier id) {
- return Expression(kIdentifierFlag | (id.type_ << kIdentifierShift));
- }
-
- static Expression StringLiteral() {
- return Expression(kUnknownStringLiteral);
- }
-
- static Expression UseStrictStringLiteral() {
- return Expression(kUseStrictString);
- }
-
- static Expression This() {
- return Expression(kThisExpression);
- }
-
- static Expression ThisProperty() {
- return Expression(kThisPropertyExpression);
- }
-
- static Expression StrictFunction() {
- return Expression(kStrictFunctionExpression);
- }
-
- bool IsIdentifier() {
- return (code_ & kIdentifierFlag) != 0;
- }
-
- // Only works corretly if it is actually an identifier expression.
- PreParser::Identifier AsIdentifier() {
- return PreParser::Identifier(
- static_cast<PreParser::Identifier::Type>(code_ >> kIdentifierShift));
- }
-
- bool IsStringLiteral() { return (code_ & kStringLiteralFlag) != 0; }
-
- bool IsUseStrictLiteral() {
- return (code_ & kStringLiteralMask) == kUseStrictString;
- }
-
- bool IsThis() {
- return code_ == kThisExpression;
- }
-
- bool IsThisProperty() {
- return code_ == kThisPropertyExpression;
- }
-
- bool IsStrictFunction() {
- return code_ == kStrictFunctionExpression;
- }
-
- private:
- // First two/three bits are used as flags.
- // Bit 0 and 1 represent identifiers or strings literals, and are
- // mutually exclusive, but can both be absent.
- enum {
- kUnknownExpression = 0,
- // Identifiers
- kIdentifierFlag = 1, // Used to detect labels.
- kIdentifierShift = 3,
-
- kStringLiteralFlag = 2, // Used to detect directive prologue.
- kUnknownStringLiteral = kStringLiteralFlag,
- kUseStrictString = kStringLiteralFlag | 8,
- kStringLiteralMask = kUseStrictString,
-
- // Below here applies if neither identifier nor string literal.
- kThisExpression = 4,
- kThisPropertyExpression = 8,
- kStrictFunctionExpression = 12
- };
-
- explicit Expression(int expression_code) : code_(expression_code) { }
-
- int code_;
- };
-
class Statement {
public:
static Statement Default() {
@@ -487,86 +1116,6 @@ class PreParser : public ParserBase {
kUnknownSourceElements
};
- typedef int Arguments;
-
- class Scope {
- public:
- Scope(Scope** variable, ScopeType type)
- : variable_(variable),
- prev_(*variable),
- type_(type),
- materialized_literal_count_(0),
- expected_properties_(0),
- with_nesting_count_(0),
- language_mode_(
- (prev_ != NULL) ? prev_->language_mode() : CLASSIC_MODE),
- is_generator_(false) {
- *variable = this;
- }
- ~Scope() { *variable_ = prev_; }
- void NextMaterializedLiteralIndex() { materialized_literal_count_++; }
- void AddProperty() { expected_properties_++; }
- ScopeType type() { return type_; }
- int expected_properties() { return expected_properties_; }
- int materialized_literal_count() { return materialized_literal_count_; }
- bool IsInsideWith() { return with_nesting_count_ != 0; }
- bool is_generator() { return is_generator_; }
- void set_is_generator(bool is_generator) { is_generator_ = is_generator; }
- bool is_classic_mode() {
- return language_mode_ == CLASSIC_MODE;
- }
- LanguageMode language_mode() {
- return language_mode_;
- }
- void set_language_mode(LanguageMode language_mode) {
- language_mode_ = language_mode;
- }
-
- class InsideWith {
- public:
- explicit InsideWith(Scope* scope) : scope_(scope) {
- scope->with_nesting_count_++;
- }
-
- ~InsideWith() { scope_->with_nesting_count_--; }
-
- private:
- Scope* scope_;
- DISALLOW_COPY_AND_ASSIGN(InsideWith);
- };
-
- private:
- Scope** const variable_;
- Scope* const prev_;
- const ScopeType type_;
- int materialized_literal_count_;
- int expected_properties_;
- int with_nesting_count_;
- LanguageMode language_mode_;
- bool is_generator_;
- };
-
- // Report syntax error
- void ReportMessageAt(Scanner::Location location,
- const char* message,
- Vector<const char*> args) {
- ReportMessageAt(location.beg_pos,
- location.end_pos,
- message,
- args.length() > 0 ? args[0] : NULL);
- }
- void ReportMessageAt(Scanner::Location location,
- const char* type,
- const char* name_opt) {
- log_->LogMessage(location.beg_pos, location.end_pos, type, name_opt);
- }
- void ReportMessageAt(int start_pos,
- int end_pos,
- const char* type,
- const char* name_opt) {
- log_->LogMessage(start_pos, end_pos, type, name_opt);
- }
-
// All ParseXXX functions take as the last argument an *ok parameter
// which is set to false if parsing failed; it is unchanged otherwise.
// By making the 'exception handling' explicit, we are forced to check
@@ -595,68 +1144,1015 @@ class PreParser : public ParserBase {
Statement ParseThrowStatement(bool* ok);
Statement ParseTryStatement(bool* ok);
Statement ParseDebuggerStatement(bool* ok);
-
- Expression ParseExpression(bool accept_IN, bool* ok);
- Expression ParseAssignmentExpression(bool accept_IN, bool* ok);
- Expression ParseYieldExpression(bool* ok);
Expression ParseConditionalExpression(bool accept_IN, bool* ok);
- Expression ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
- Expression ParseUnaryExpression(bool* ok);
- Expression ParsePostfixExpression(bool* ok);
- Expression ParseLeftHandSideExpression(bool* ok);
- Expression ParseNewExpression(bool* ok);
- Expression ParseMemberExpression(bool* ok);
- Expression ParseMemberWithNewPrefixesExpression(unsigned new_count, bool* ok);
- Expression ParsePrimaryExpression(bool* ok);
- Expression ParseArrayLiteral(bool* ok);
Expression ParseObjectLiteral(bool* ok);
- Expression ParseRegExpLiteral(bool seen_equal, bool* ok);
Expression ParseV8Intrinsic(bool* ok);
- Arguments ParseArguments(bool* ok);
Expression ParseFunctionLiteral(
Identifier name,
Scanner::Location function_name_location,
bool name_is_strict_reserved,
bool is_generator,
+ int function_token_pos,
+ FunctionLiteral::FunctionType function_type,
bool* ok);
void ParseLazyFunctionLiteralBody(bool* ok);
- Identifier ParseIdentifier(AllowEvalOrArgumentsAsIdentifier, bool* ok);
- Identifier ParseIdentifierOrStrictReservedWord(bool* is_strict_reserved,
- bool* ok);
- Identifier ParseIdentifierName(bool* ok);
- Identifier ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok);
-
// Logs the currently parsed literal as a symbol in the preparser data.
void LogSymbol();
- // Log the currently parsed identifier.
- Identifier GetIdentifierSymbol();
// Log the currently parsed string literal.
Expression GetStringSymbol();
- void set_language_mode(LanguageMode language_mode) {
- scope_->set_language_mode(language_mode);
+ bool CheckInOrOf(bool accept_OF);
+};
+
+template<class Traits>
+ParserBase<Traits>::FunctionState::FunctionState(
+ FunctionState** function_state_stack,
+ typename Traits::Type::Scope** scope_stack,
+ typename Traits::Type::Scope* scope,
+ typename Traits::Type::Zone* extra_param)
+ : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
+ next_handler_index_(0),
+ expected_property_count_(0),
+ is_generator_(false),
+ generator_object_variable_(NULL),
+ function_state_stack_(function_state_stack),
+ outer_function_state_(*function_state_stack),
+ scope_stack_(scope_stack),
+ outer_scope_(*scope_stack),
+ isolate_(NULL),
+ saved_ast_node_id_(0),
+ factory_(extra_param) {
+ *scope_stack_ = scope;
+ *function_state_stack = this;
+ Traits::SetUpFunctionState(this, extra_param);
+}
+
+
+template<class Traits>
+ParserBase<Traits>::FunctionState::~FunctionState() {
+ *scope_stack_ = outer_scope_;
+ *function_state_stack_ = outer_function_state_;
+ Traits::TearDownFunctionState(this);
+}
+
+
+template<class Traits>
+void ParserBase<Traits>::ReportUnexpectedToken(Token::Value token) {
+ Scanner::Location source_location = scanner()->location();
+
+ // Four of the tokens are treated specially
+ switch (token) {
+ case Token::EOS:
+ return ReportMessageAt(source_location, "unexpected_eos");
+ case Token::NUMBER:
+ return ReportMessageAt(source_location, "unexpected_token_number");
+ case Token::STRING:
+ return ReportMessageAt(source_location, "unexpected_token_string");
+ case Token::IDENTIFIER:
+ return ReportMessageAt(source_location, "unexpected_token_identifier");
+ case Token::FUTURE_RESERVED_WORD:
+ return ReportMessageAt(source_location, "unexpected_reserved");
+ case Token::YIELD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ return ReportMessageAt(source_location, strict_mode() == SLOPPY
+ ? "unexpected_token_identifier" : "unexpected_strict_reserved");
+ default:
+ const char* name = Token::String(token);
+ ASSERT(name != NULL);
+ Traits::ReportMessageAt(
+ source_location, "unexpected_token", Vector<const char*>(&name, 1));
+ }
+}
+
+
+template<class Traits>
+typename ParserBase<Traits>::IdentifierT ParserBase<Traits>::ParseIdentifier(
+ AllowEvalOrArgumentsAsIdentifier allow_eval_or_arguments,
+ bool* ok) {
+ Token::Value next = Next();
+ if (next == Token::IDENTIFIER) {
+ IdentifierT name = this->GetSymbol(scanner());
+ if (allow_eval_or_arguments == kDontAllowEvalOrArguments &&
+ strict_mode() == STRICT && this->IsEvalOrArguments(name)) {
+ ReportMessageAt(scanner()->location(), "strict_eval_arguments");
+ *ok = false;
+ }
+ return name;
+ } else if (strict_mode() == SLOPPY &&
+ (next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ (next == Token::YIELD && !is_generator()))) {
+ return this->GetSymbol(scanner());
+ } else {
+ this->ReportUnexpectedToken(next);
+ *ok = false;
+ return Traits::EmptyIdentifier();
+ }
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::IdentifierT ParserBase<
+ Traits>::ParseIdentifierOrStrictReservedWord(bool* is_strict_reserved,
+ bool* ok) {
+ Token::Value next = Next();
+ if (next == Token::IDENTIFIER) {
+ *is_strict_reserved = false;
+ } else if (next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ (next == Token::YIELD && !this->is_generator())) {
+ *is_strict_reserved = true;
+ } else {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return Traits::EmptyIdentifier();
+ }
+ return this->GetSymbol(scanner());
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::IdentifierT
+ParserBase<Traits>::ParseIdentifierName(bool* ok) {
+ Token::Value next = Next();
+ if (next != Token::IDENTIFIER && next != Token::FUTURE_RESERVED_WORD &&
+ next != Token::FUTURE_STRICT_RESERVED_WORD && !Token::IsKeyword(next)) {
+ this->ReportUnexpectedToken(next);
+ *ok = false;
+ return Traits::EmptyIdentifier();
+ }
+ return this->GetSymbol(scanner());
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::IdentifierT
+ParserBase<Traits>::ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok) {
+ IdentifierT result = ParseIdentifierName(ok);
+ if (!*ok) return Traits::EmptyIdentifier();
+ scanner()->IsGetOrSet(is_get, is_set);
+ return result;
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseRegExpLiteral(
+ bool seen_equal, bool* ok) {
+ int pos = peek_position();
+ if (!scanner()->ScanRegExpPattern(seen_equal)) {
+ Next();
+ ReportMessage("unterminated_regexp", Vector<const char*>::empty());
+ *ok = false;
+ return Traits::EmptyExpression();
}
- virtual bool is_classic_mode() {
- return scope_->language_mode() == CLASSIC_MODE;
+ int literal_index = function_state_->NextMaterializedLiteralIndex();
+
+ IdentifierT js_pattern = this->NextLiteralString(scanner(), TENURED);
+ if (!scanner()->ScanRegExpFlags()) {
+ Next();
+ ReportMessageAt(scanner()->location(), "invalid_regexp_flags");
+ *ok = false;
+ return Traits::EmptyExpression();
+ }
+ IdentifierT js_flags = this->NextLiteralString(scanner(), TENURED);
+ Next();
+ return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
+}
+
+
+#define CHECK_OK ok); \
+ if (!*ok) return this->EmptyExpression(); \
+ ((void)0
+#define DUMMY ) // to make indentation work
+#undef DUMMY
+
+// Used in functions where the return type is not ExpressionT.
+#define CHECK_OK_CUSTOM(x) ok); \
+ if (!*ok) return this->x(); \
+ ((void)0
+#define DUMMY ) // to make indentation work
+#undef DUMMY
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParsePrimaryExpression(bool* ok) {
+ // PrimaryExpression ::
+ // 'this'
+ // 'null'
+ // 'true'
+ // 'false'
+ // Identifier
+ // Number
+ // String
+ // ArrayLiteral
+ // ObjectLiteral
+ // RegExpLiteral
+ // '(' Expression ')'
+
+ int pos = peek_position();
+ ExpressionT result = this->EmptyExpression();
+ Token::Value token = peek();
+ switch (token) {
+ case Token::THIS: {
+ Consume(Token::THIS);
+ result = this->ThisExpression(scope_, factory());
+ break;
+ }
+
+ case Token::NULL_LITERAL:
+ case Token::TRUE_LITERAL:
+ case Token::FALSE_LITERAL:
+ case Token::NUMBER:
+ Next();
+ result = this->ExpressionFromLiteral(token, pos, scanner(), factory());
+ break;
+
+ case Token::IDENTIFIER:
+ case Token::YIELD:
+ case Token::FUTURE_STRICT_RESERVED_WORD: {
+ // Using eval or arguments in this context is OK even in strict mode.
+ IdentifierT name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
+ result = this->ExpressionFromIdentifier(name, pos, scope_, factory());
+ break;
+ }
+
+ case Token::STRING: {
+ Consume(Token::STRING);
+ result = this->ExpressionFromString(pos, scanner(), factory());
+ break;
+ }
+
+ case Token::ASSIGN_DIV:
+ result = this->ParseRegExpLiteral(true, CHECK_OK);
+ break;
+
+ case Token::DIV:
+ result = this->ParseRegExpLiteral(false, CHECK_OK);
+ break;
+
+ case Token::LBRACK:
+ result = this->ParseArrayLiteral(CHECK_OK);
+ break;
+
+ case Token::LBRACE:
+ result = this->ParseObjectLiteral(CHECK_OK);
+ break;
+
+ case Token::LPAREN:
+ Consume(Token::LPAREN);
+ // Heuristically try to detect immediately called functions before
+ // seeing the call parentheses.
+ parenthesized_function_ = (peek() == Token::FUNCTION);
+ result = this->ParseExpression(true, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+ break;
+
+ case Token::MOD:
+ if (allow_natives_syntax() || extension_ != NULL) {
+ result = this->ParseV8Intrinsic(CHECK_OK);
+ break;
+ }
+ // If we're not allowing special syntax we fall-through to the
+ // default case.
+
+ default: {
+ Next();
+ ReportUnexpectedToken(token);
+ *ok = false;
+ }
}
- bool is_extended_mode() {
- return scope_->language_mode() == EXTENDED_MODE;
+ return result;
+}
+
+// Precedence = 1
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
+ bool accept_IN, bool* ok) {
+ // Expression ::
+ // AssignmentExpression
+ // Expression ',' AssignmentExpression
+
+ ExpressionT result = this->ParseAssignmentExpression(accept_IN, CHECK_OK);
+ while (peek() == Token::COMMA) {
+ Expect(Token::COMMA, CHECK_OK);
+ int pos = position();
+ ExpressionT right = this->ParseAssignmentExpression(accept_IN, CHECK_OK);
+ result = factory()->NewBinaryOperation(Token::COMMA, result, right, pos);
+ }
+ return result;
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
+ bool* ok) {
+ // ArrayLiteral ::
+ // '[' Expression? (',' Expression?)* ']'
+
+ int pos = peek_position();
+ typename Traits::Type::ExpressionList values =
+ this->NewExpressionList(4, zone_);
+ Expect(Token::LBRACK, CHECK_OK);
+ while (peek() != Token::RBRACK) {
+ ExpressionT elem = this->EmptyExpression();
+ if (peek() == Token::COMMA) {
+ elem = this->GetLiteralTheHole(peek_position(), factory());
+ } else {
+ elem = this->ParseAssignmentExpression(true, CHECK_OK);
+ }
+ values->Add(elem, zone_);
+ if (peek() != Token::RBRACK) {
+ Expect(Token::COMMA, CHECK_OK);
+ }
}
+ Expect(Token::RBRACK, CHECK_OK);
- LanguageMode language_mode() { return scope_->language_mode(); }
+ // Update the scope information before the pre-parsing bailout.
+ int literal_index = function_state_->NextMaterializedLiteralIndex();
- bool CheckInOrOf(bool accept_OF);
+ return factory()->NewArrayLiteral(values, literal_index, pos);
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
+ bool* ok) {
+ // ObjectLiteral ::
+ // '{' ((
+ // ((IdentifierName | String | Number) ':' AssignmentExpression) |
+ // (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
+ // ) ',')* '}'
+ // (Except that trailing comma is not required and not allowed.)
+
+ int pos = peek_position();
+ typename Traits::Type::PropertyList properties =
+ this->NewPropertyList(4, zone_);
+ int number_of_boilerplate_properties = 0;
+ bool has_function = false;
+
+ ObjectLiteralChecker checker(this, strict_mode());
+
+ Expect(Token::LBRACE, CHECK_OK);
+
+ while (peek() != Token::RBRACE) {
+ if (fni_ != NULL) fni_->Enter();
+
+ typename Traits::Type::Literal key = this->EmptyLiteral();
+ Token::Value next = peek();
+ int next_pos = peek_position();
+
+ switch (next) {
+ case Token::FUTURE_RESERVED_WORD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::IDENTIFIER: {
+ bool is_getter = false;
+ bool is_setter = false;
+ IdentifierT id =
+ ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
+ if (fni_ != NULL) this->PushLiteralName(fni_, id);
+
+ if ((is_getter || is_setter) && peek() != Token::COLON) {
+ // Special handling of getter and setter syntax:
+ // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
+ // We have already read the "get" or "set" keyword.
+ Token::Value next = Next();
+ if (next != i::Token::IDENTIFIER &&
+ next != i::Token::FUTURE_RESERVED_WORD &&
+ next != i::Token::FUTURE_STRICT_RESERVED_WORD &&
+ next != i::Token::NUMBER &&
+ next != i::Token::STRING &&
+ !Token::IsKeyword(next)) {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return this->EmptyLiteral();
+ }
+ // Validate the property.
+ PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
+ checker.CheckProperty(next, type, CHECK_OK);
+ IdentifierT name = this->GetSymbol(scanner_);
+ typename Traits::Type::FunctionLiteral value =
+ this->ParseFunctionLiteral(
+ name, scanner()->location(),
+ false, // reserved words are allowed here
+ false, // not a generator
+ RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION,
+ CHECK_OK);
+ // Allow any number of parameters for compatibilty with JSC.
+ // Specification only allows zero parameters for get and one for set.
+ typename Traits::Type::ObjectLiteralProperty property =
+ factory()->NewObjectLiteralProperty(is_getter, value, next_pos);
+ if (this->IsBoilerplateProperty(property)) {
+ number_of_boilerplate_properties++;
+ }
+ properties->Add(property, zone());
+ if (peek() != Token::RBRACE) {
+ // Need {} because of the CHECK_OK macro.
+ Expect(Token::COMMA, CHECK_OK);
+ }
+
+ if (fni_ != NULL) {
+ fni_->Infer();
+ fni_->Leave();
+ }
+ continue; // restart the while
+ }
+ // Failed to parse as get/set property, so it's just a normal property
+ // (which might be called "get" or "set" or something else).
+ key = factory()->NewLiteral(id, next_pos);
+ break;
+ }
+ case Token::STRING: {
+ Consume(Token::STRING);
+ IdentifierT string = this->GetSymbol(scanner_);
+ if (fni_ != NULL) this->PushLiteralName(fni_, string);
+ uint32_t index;
+ if (this->IsArrayIndex(string, &index)) {
+ key = factory()->NewNumberLiteral(index, next_pos);
+ break;
+ }
+ key = factory()->NewLiteral(string, next_pos);
+ break;
+ }
+ case Token::NUMBER: {
+ Consume(Token::NUMBER);
+ key = this->ExpressionFromLiteral(Token::NUMBER, next_pos, scanner_,
+ factory());
+ break;
+ }
+ default:
+ if (Token::IsKeyword(next)) {
+ Consume(next);
+ IdentifierT string = this->GetSymbol(scanner_);
+ key = factory()->NewLiteral(string, next_pos);
+ } else {
+ Token::Value next = Next();
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return this->EmptyLiteral();
+ }
+ }
+
+ // Validate the property
+ checker.CheckProperty(next, kValueProperty, CHECK_OK);
+
+ Expect(Token::COLON, CHECK_OK);
+ ExpressionT value = this->ParseAssignmentExpression(true, CHECK_OK);
+
+ typename Traits::Type::ObjectLiteralProperty property =
+ factory()->NewObjectLiteralProperty(key, value);
+
+ // Mark top-level object literals that contain function literals and
+ // pretenure the literal so it can be added as a constant function
+ // property. (Parser only.)
+ this->CheckFunctionLiteralInsideTopLevelObjectLiteral(scope_, value,
+ &has_function);
+
+ // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
+ if (this->IsBoilerplateProperty(property)) {
+ number_of_boilerplate_properties++;
+ }
+ properties->Add(property, zone());
+
+ // TODO(1240767): Consider allowing trailing comma.
+ if (peek() != Token::RBRACE) {
+ // Need {} because of the CHECK_OK macro.
+ Expect(Token::COMMA, CHECK_OK);
+ }
+
+ if (fni_ != NULL) {
+ fni_->Infer();
+ fni_->Leave();
+ }
+ }
+ Expect(Token::RBRACE, CHECK_OK);
+
+ // Computation of literal_index must happen before pre parse bailout.
+ int literal_index = function_state_->NextMaterializedLiteralIndex();
+
+ return factory()->NewObjectLiteral(properties,
+ literal_index,
+ number_of_boilerplate_properties,
+ has_function,
+ pos);
+}
+
+
+template <class Traits>
+typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
+ bool* ok) {
+ // Arguments ::
+ // '(' (AssignmentExpression)*[','] ')'
+
+ typename Traits::Type::ExpressionList result =
+ this->NewExpressionList(4, zone_);
+ Expect(Token::LPAREN, CHECK_OK_CUSTOM(NullExpressionList));
+ bool done = (peek() == Token::RPAREN);
+ while (!done) {
+ ExpressionT argument = this->ParseAssignmentExpression(
+ true, CHECK_OK_CUSTOM(NullExpressionList));
+ result->Add(argument, zone_);
+ if (result->length() > Code::kMaxArguments) {
+ ReportMessageAt(scanner()->location(), "too_many_arguments");
+ *ok = false;
+ return this->NullExpressionList();
+ }
+ done = (peek() == Token::RPAREN);
+ if (!done) {
+ // Need {} because of the CHECK_OK_CUSTOM macro.
+ Expect(Token::COMMA, CHECK_OK_CUSTOM(NullExpressionList));
+ }
+ }
+ Expect(Token::RPAREN, CHECK_OK_CUSTOM(NullExpressionList));
+ return result;
+}
+
+// Precedence = 2
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
+ // AssignmentExpression ::
+ // ConditionalExpression
+ // YieldExpression
+ // LeftHandSideExpression AssignmentOperator AssignmentExpression
+
+ Scanner::Location lhs_location = scanner()->peek_location();
+
+ if (peek() == Token::YIELD && is_generator()) {
+ return this->ParseYieldExpression(ok);
+ }
+
+ if (fni_ != NULL) fni_->Enter();
+ ExpressionT expression =
+ this->ParseConditionalExpression(accept_IN, CHECK_OK);
+
+ if (!Token::IsAssignmentOp(peek())) {
+ if (fni_ != NULL) fni_->Leave();
+ // Parsed conditional expression only (no assignment).
+ return expression;
+ }
+
+ if (!expression->IsValidLeftHandSide()) {
+ this->ReportMessageAt(lhs_location, "invalid_lhs_in_assignment", true);
+ *ok = false;
+ return this->EmptyExpression();
+ }
+
+ if (strict_mode() == STRICT) {
+ // Assignment to eval or arguments is disallowed in strict mode.
+ this->CheckStrictModeLValue(expression, CHECK_OK);
+ }
+ expression = this->MarkExpressionAsLValue(expression);
+
+ Token::Value op = Next(); // Get assignment operator.
+ int pos = position();
+ ExpressionT right = this->ParseAssignmentExpression(accept_IN, CHECK_OK);
+
+ // TODO(1231235): We try to estimate the set of properties set by
+ // constructors. We define a new property whenever there is an
+ // assignment to a property of 'this'. We should probably only add
+ // properties if we haven't seen them before. Otherwise we'll
+ // probably overestimate the number of properties.
+ if (op == Token::ASSIGN && this->IsThisProperty(expression)) {
+ function_state_->AddProperty();
+ }
+
+ this->CheckAssigningFunctionLiteralToProperty(expression, right);
+
+ if (fni_ != NULL) {
+ // Check if the right hand side is a call to avoid inferring a
+ // name if we're dealing with "a = function(){...}();"-like
+ // expression.
+ if ((op == Token::INIT_VAR
+ || op == Token::INIT_CONST_LEGACY
+ || op == Token::ASSIGN)
+ && (!right->IsCall() && !right->IsCallNew())) {
+ fni_->Infer();
+ } else {
+ fni_->RemoveLastFunction();
+ }
+ fni_->Leave();
+ }
+
+ return factory()->NewAssignment(op, expression, right, pos);
+}
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseYieldExpression(bool* ok) {
+ // YieldExpression ::
+ // 'yield' '*'? AssignmentExpression
+ int pos = peek_position();
+ Expect(Token::YIELD, CHECK_OK);
+ Yield::Kind kind =
+ Check(Token::MUL) ? Yield::DELEGATING : Yield::SUSPEND;
+ ExpressionT generator_object =
+ factory()->NewVariableProxy(function_state_->generator_object_variable());
+ ExpressionT expression =
+ ParseAssignmentExpression(false, CHECK_OK);
+ typename Traits::Type::YieldExpression yield =
+ factory()->NewYield(generator_object, expression, kind, pos);
+ if (kind == Yield::DELEGATING) {
+ yield->set_index(function_state_->NextHandlerIndex());
+ }
+ return yield;
+}
+
+
+// Precedence = 3
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseConditionalExpression(bool accept_IN, bool* ok) {
+ // ConditionalExpression ::
+ // LogicalOrExpression
+ // LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
+
+ int pos = peek_position();
+ // We start using the binary expression parser for prec >= 4 only!
+ ExpressionT expression = this->ParseBinaryExpression(4, accept_IN, CHECK_OK);
+ if (peek() != Token::CONDITIONAL) return expression;
+ Consume(Token::CONDITIONAL);
+ // In parsing the first assignment expression in conditional
+ // expressions we always accept the 'in' keyword; see ECMA-262,
+ // section 11.12, page 58.
+ ExpressionT left = ParseAssignmentExpression(true, CHECK_OK);
+ Expect(Token::COLON, CHECK_OK);
+ ExpressionT right = ParseAssignmentExpression(accept_IN, CHECK_OK);
+ return factory()->NewConditional(expression, left, right, pos);
+}
+
+
+// Precedence >= 4
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
+ ASSERT(prec >= 4);
+ ExpressionT x = this->ParseUnaryExpression(CHECK_OK);
+ for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
+ // prec1 >= 4
+ while (Precedence(peek(), accept_IN) == prec1) {
+ Token::Value op = Next();
+ int pos = position();
+ ExpressionT y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
+
+ if (this->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos,
+ factory())) {
+ continue;
+ }
+
+ // For now we distinguish between comparisons and other binary
+ // operations. (We could combine the two and get rid of this
+ // code and AST node eventually.)
+ if (Token::IsCompareOp(op)) {
+ // We have a comparison.
+ Token::Value cmp = op;
+ switch (op) {
+ case Token::NE: cmp = Token::EQ; break;
+ case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
+ default: break;
+ }
+ x = factory()->NewCompareOperation(cmp, x, y, pos);
+ if (cmp != op) {
+ // The comparison was negated - add a NOT.
+ x = factory()->NewUnaryOperation(Token::NOT, x, pos);
+ }
+
+ } else {
+ // We have a "normal" binary operation.
+ x = factory()->NewBinaryOperation(op, x, y, pos);
+ }
+ }
+ }
+ return x;
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseUnaryExpression(bool* ok) {
+ // UnaryExpression ::
+ // PostfixExpression
+ // 'delete' UnaryExpression
+ // 'void' UnaryExpression
+ // 'typeof' UnaryExpression
+ // '++' UnaryExpression
+ // '--' UnaryExpression
+ // '+' UnaryExpression
+ // '-' UnaryExpression
+ // '~' UnaryExpression
+ // '!' UnaryExpression
+
+ Token::Value op = peek();
+ if (Token::IsUnaryOp(op)) {
+ op = Next();
+ int pos = position();
+ ExpressionT expression = ParseUnaryExpression(CHECK_OK);
+
+ // "delete identifier" is a syntax error in strict mode.
+ if (op == Token::DELETE && strict_mode() == STRICT &&
+ this->IsIdentifier(expression)) {
+ ReportMessage("strict_delete", Vector<const char*>::empty());
+ *ok = false;
+ return this->EmptyExpression();
+ }
+
+ // Allow Traits do rewrite the expression.
+ return this->BuildUnaryExpression(expression, op, pos, factory());
+ } else if (Token::IsCountOp(op)) {
+ op = Next();
+ Scanner::Location lhs_location = scanner()->peek_location();
+ ExpressionT expression = ParseUnaryExpression(CHECK_OK);
+ if (!expression->IsValidLeftHandSide()) {
+ ReportMessageAt(lhs_location, "invalid_lhs_in_prefix_op", true);
+ *ok = false;
+ return this->EmptyExpression();
+ }
+
+ if (strict_mode() == STRICT) {
+ // Prefix expression operand in strict mode may not be eval or arguments.
+ this->CheckStrictModeLValue(expression, CHECK_OK);
+ }
+ this->MarkExpressionAsLValue(expression);
+
+ return factory()->NewCountOperation(op,
+ true /* prefix */,
+ expression,
+ position());
+
+ } else {
+ return this->ParsePostfixExpression(ok);
+ }
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParsePostfixExpression(bool* ok) {
+ // PostfixExpression ::
+ // LeftHandSideExpression ('++' | '--')?
+
+ Scanner::Location lhs_location = scanner()->peek_location();
+ ExpressionT expression = this->ParseLeftHandSideExpression(CHECK_OK);
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
+ Token::IsCountOp(peek())) {
+ if (!expression->IsValidLeftHandSide()) {
+ ReportMessageAt(lhs_location, "invalid_lhs_in_postfix_op", true);
+ *ok = false;
+ return this->EmptyExpression();
+ }
+
+ if (strict_mode() == STRICT) {
+ // Postfix expression operand in strict mode may not be eval or arguments.
+ this->CheckStrictModeLValue(expression, CHECK_OK);
+ }
+ expression = this->MarkExpressionAsLValue(expression);
+
+ Token::Value next = Next();
+ expression =
+ factory()->NewCountOperation(next,
+ false /* postfix */,
+ expression,
+ position());
+ }
+ return expression;
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseLeftHandSideExpression(bool* ok) {
+ // LeftHandSideExpression ::
+ // (NewExpression | MemberExpression) ...
+
+ ExpressionT result = this->ParseMemberWithNewPrefixesExpression(CHECK_OK);
+
+ while (true) {
+ switch (peek()) {
+ case Token::LBRACK: {
+ Consume(Token::LBRACK);
+ int pos = position();
+ ExpressionT index = ParseExpression(true, CHECK_OK);
+ result = factory()->NewProperty(result, index, pos);
+ Expect(Token::RBRACK, CHECK_OK);
+ break;
+ }
+
+ case Token::LPAREN: {
+ int pos;
+ if (scanner()->current_token() == Token::IDENTIFIER) {
+ // For call of an identifier we want to report position of
+ // the identifier as position of the call in the stack trace.
+ pos = position();
+ } else {
+ // For other kinds of calls we record position of the parenthesis as
+ // position of the call. Note that this is extremely important for
+ // expressions of the form function(){...}() for which call position
+ // should not point to the closing brace otherwise it will intersect
+ // with positions recorded for function literal and confuse debugger.
+ pos = peek_position();
+ // Also the trailing parenthesis are a hint that the function will
+ // be called immediately. If we happen to have parsed a preceding
+ // function literal eagerly, we can also compile it eagerly.
+ if (result->IsFunctionLiteral() && mode() == PARSE_EAGERLY) {
+ result->AsFunctionLiteral()->set_parenthesized();
+ }
+ }
+ typename Traits::Type::ExpressionList args = ParseArguments(CHECK_OK);
+
+ // Keep track of eval() calls since they disable all local variable
+ // optimizations.
+ // The calls that need special treatment are the
+ // direct eval calls. These calls are all of the form eval(...), with
+ // no explicit receiver.
+ // These calls are marked as potentially direct eval calls. Whether
+ // they are actually direct calls to eval is determined at run time.
+ this->CheckPossibleEvalCall(result, scope_);
+ result = factory()->NewCall(result, args, pos);
+ if (fni_ != NULL) fni_->RemoveLastFunction();
+ break;
+ }
+
+ case Token::PERIOD: {
+ Consume(Token::PERIOD);
+ int pos = position();
+ IdentifierT name = ParseIdentifierName(CHECK_OK);
+ result = factory()->NewProperty(
+ result, factory()->NewLiteral(name, pos), pos);
+ if (fni_ != NULL) this->PushLiteralName(fni_, name);
+ break;
+ }
+
+ default:
+ return result;
+ }
+ }
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseMemberWithNewPrefixesExpression(bool* ok) {
+ // NewExpression ::
+ // ('new')+ MemberExpression
+
+ // The grammar for new expressions is pretty warped. We can have several 'new'
+ // keywords following each other, and then a MemberExpression. When we see '('
+ // after the MemberExpression, it's associated with the rightmost unassociated
+ // 'new' to create a NewExpression with arguments. However, a NewExpression
+ // can also occur without arguments.
+
+ // Examples of new expression:
+ // new foo.bar().baz means (new (foo.bar)()).baz
+ // new foo()() means (new foo())()
+ // new new foo()() means (new (new foo())())
+ // new new foo means new (new foo)
+ // new new foo() means new (new foo())
+ // new new foo().bar().baz means (new (new foo()).bar()).baz
+
+ if (peek() == Token::NEW) {
+ Consume(Token::NEW);
+ int new_pos = position();
+ ExpressionT result = this->ParseMemberWithNewPrefixesExpression(CHECK_OK);
+ if (peek() == Token::LPAREN) {
+ // NewExpression with arguments.
+ typename Traits::Type::ExpressionList args =
+ this->ParseArguments(CHECK_OK);
+ result = factory()->NewCallNew(result, args, new_pos);
+ // The expression can still continue with . or [ after the arguments.
+ result = this->ParseMemberExpressionContinuation(result, CHECK_OK);
+ return result;
+ }
+ // NewExpression without arguments.
+ return factory()->NewCallNew(result, this->NewExpressionList(0, zone_),
+ new_pos);
+ }
+ // No 'new' keyword.
+ return this->ParseMemberExpression(ok);
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseMemberExpression(bool* ok) {
+ // MemberExpression ::
+ // (PrimaryExpression | FunctionLiteral)
+ // ('[' Expression ']' | '.' Identifier | Arguments)*
+
+ // The '[' Expression ']' and '.' Identifier parts are parsed by
+ // ParseMemberExpressionContinuation, and the Arguments part is parsed by the
+ // caller.
+
+ // Parse the initial primary or function expression.
+ ExpressionT result = this->EmptyExpression();
+ if (peek() == Token::FUNCTION) {
+ Consume(Token::FUNCTION);
+ int function_token_position = position();
+ bool is_generator = allow_generators() && Check(Token::MUL);
+ IdentifierT name;
+ bool is_strict_reserved_name = false;
+ Scanner::Location function_name_location = Scanner::Location::invalid();
+ FunctionLiteral::FunctionType function_type =
+ FunctionLiteral::ANONYMOUS_EXPRESSION;
+ if (peek_any_identifier()) {
+ name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
+ CHECK_OK);
+ function_name_location = scanner()->location();
+ function_type = FunctionLiteral::NAMED_EXPRESSION;
+ }
+ result = this->ParseFunctionLiteral(name,
+ function_name_location,
+ is_strict_reserved_name,
+ is_generator,
+ function_token_position,
+ function_type,
+ CHECK_OK);
+ } else {
+ result = ParsePrimaryExpression(CHECK_OK);
+ }
+
+ result = ParseMemberExpressionContinuation(result, CHECK_OK);
+ return result;
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseMemberExpressionContinuation(ExpressionT expression,
+ bool* ok) {
+ // Parses this part of MemberExpression:
+ // ('[' Expression ']' | '.' Identifier)*
+ while (true) {
+ switch (peek()) {
+ case Token::LBRACK: {
+ Consume(Token::LBRACK);
+ int pos = position();
+ ExpressionT index = this->ParseExpression(true, CHECK_OK);
+ expression = factory()->NewProperty(expression, index, pos);
+ if (fni_ != NULL) {
+ this->PushPropertyName(fni_, index);
+ }
+ Expect(Token::RBRACK, CHECK_OK);
+ break;
+ }
+ case Token::PERIOD: {
+ Consume(Token::PERIOD);
+ int pos = position();
+ IdentifierT name = ParseIdentifierName(CHECK_OK);
+ expression = factory()->NewProperty(
+ expression, factory()->NewLiteral(name, pos), pos);
+ if (fni_ != NULL) {
+ this->PushLiteralName(fni_, name);
+ }
+ break;
+ }
+ default:
+ return expression;
+ }
+ }
+ ASSERT(false);
+ return this->EmptyExpression();
+}
+
+
+#undef CHECK_OK
+#undef CHECK_OK_CUSTOM
+
+
+template <typename Traits>
+void ParserBase<Traits>::ObjectLiteralChecker::CheckProperty(
+ Token::Value property,
+ PropertyKind type,
+ bool* ok) {
+ int old;
+ if (property == Token::NUMBER) {
+ old = scanner()->FindNumber(&finder_, type);
+ } else {
+ old = scanner()->FindSymbol(&finder_, type);
+ }
+ PropertyKind old_type = static_cast<PropertyKind>(old);
+ if (HasConflict(old_type, type)) {
+ if (IsDataDataConflict(old_type, type)) {
+ // Both are data properties.
+ if (strict_mode_ == SLOPPY) return;
+ parser()->ReportMessageAt(scanner()->location(),
+ "strict_duplicate_property");
+ } else if (IsDataAccessorConflict(old_type, type)) {
+ // Both a data and an accessor property with the same name.
+ parser()->ReportMessageAt(scanner()->location(),
+ "accessor_data_property");
+ } else {
+ ASSERT(IsAccessorAccessorConflict(old_type, type));
+ // Both accessors of the same type.
+ parser()->ReportMessageAt(scanner()->location(),
+ "accessor_get_set");
+ }
+ *ok = false;
+ }
+}
- ParserRecorder* log_;
- Scope* scope_;
- bool parenthesized_function_;
-};
} } // v8::internal
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index e363f6776..9aeb8f5c2 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -47,7 +47,7 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
line_number_(line_number),
column_number_(column_number),
shared_id_(0),
- script_id_(v8::Script::kNoScriptId),
+ script_id_(v8::UnboundScript::kNoScriptId),
no_frame_ranges_(NULL),
bailout_reason_(kEmptyBailoutReason) { }
diff --git a/deps/v8/src/promise.js b/deps/v8/src/promise.js
index db7863f80..50f91ae0b 100644
--- a/deps/v8/src/promise.js
+++ b/deps/v8/src/promise.js
@@ -34,33 +34,7 @@
// var $WeakMap = global.WeakMap
-var $Promise = Promise;
-
-
-//-------------------------------------------------------------------
-
-// Core functionality.
-
-// Event queue format: [(value, [(handler, deferred)*])*]
-// I.e., a list of value/tasks pairs, where the value is a resolution value or
-// rejection reason, and the tasks are a respective list of handler/deferred
-// pairs waiting for notification of this value. Each handler is an onResolve or
-// onReject function provided to the same call of 'chain' that produced the
-// associated deferred.
-var promiseEvents = new InternalArray;
-
-// Status values: 0 = pending, +1 = resolved, -1 = rejected
-var promiseStatus = NEW_PRIVATE("Promise#status");
-var promiseValue = NEW_PRIVATE("Promise#value");
-var promiseOnResolve = NEW_PRIVATE("Promise#onResolve");
-var promiseOnReject = NEW_PRIVATE("Promise#onReject");
-var promiseRaw = NEW_PRIVATE("Promise#raw");
-
-function IsPromise(x) {
- return IS_SPEC_OBJECT(x) && %HasLocalProperty(x, promiseStatus);
-}
-
-function Promise(resolver) {
+var $Promise = function Promise(resolver) {
if (resolver === promiseRaw) return;
if (!%_IsConstructCall()) throw MakeTypeError('not_a_promise', [this]);
if (typeof resolver !== 'function')
@@ -74,6 +48,22 @@ function Promise(resolver) {
}
}
+
+//-------------------------------------------------------------------
+
+// Core functionality.
+
+// Status values: 0 = pending, +1 = resolved, -1 = rejected
+var promiseStatus = GLOBAL_PRIVATE("Promise#status");
+var promiseValue = GLOBAL_PRIVATE("Promise#value");
+var promiseOnResolve = GLOBAL_PRIVATE("Promise#onResolve");
+var promiseOnReject = GLOBAL_PRIVATE("Promise#onReject");
+var promiseRaw = GLOBAL_PRIVATE("Promise#raw");
+
+function IsPromise(x) {
+ return IS_SPEC_OBJECT(x) && %HasLocalProperty(x, promiseStatus);
+}
+
function PromiseSet(promise, status, value, onResolve, onReject) {
SET_PRIVATE(promise, promiseStatus, status);
SET_PRIVATE(promise, promiseValue, value);
@@ -102,12 +92,21 @@ function PromiseReject(promise, r) {
}
+// For API.
+
+function PromiseNopResolver() {}
+
+function PromiseCreate() {
+ return new $Promise(PromiseNopResolver)
+}
+
+
// Convenience.
function PromiseDeferred() {
if (this === $Promise) {
// Optimized case, avoid extra closure.
- var promise = PromiseInit(new Promise(promiseRaw));
+ var promise = PromiseInit(new $Promise(promiseRaw));
return {
promise: promise,
resolve: function(x) { PromiseResolve(promise, x) },
@@ -126,7 +125,7 @@ function PromiseDeferred() {
function PromiseResolved(x) {
if (this === $Promise) {
// Optimized case, avoid extra closure.
- return PromiseSet(new Promise(promiseRaw), +1, x);
+ return PromiseSet(new $Promise(promiseRaw), +1, x);
} else {
return new this(function(resolve, reject) { resolve(x) });
}
@@ -135,7 +134,7 @@ function PromiseResolved(x) {
function PromiseRejected(r) {
if (this === $Promise) {
// Optimized case, avoid extra closure.
- return PromiseSet(new Promise(promiseRaw), -1, r);
+ return PromiseSet(new $Promise(promiseRaw), -1, r);
} else {
return new this(function(resolve, reject) { reject(r) });
}
@@ -169,64 +168,68 @@ function PromiseChain(onResolve, onReject) { // a.k.a. flatMap
}
function PromiseCatch(onReject) {
- return this.chain(UNDEFINED, onReject);
+ return this.then(UNDEFINED, onReject);
}
function PromiseEnqueue(value, tasks) {
- promiseEvents.push(value, tasks);
+ GetMicrotaskQueue().push(function() {
+ for (var i = 0; i < tasks.length; i += 2) {
+ PromiseHandle(value, tasks[i], tasks[i + 1])
+ }
+ });
+
%SetMicrotaskPending(true);
}
-function PromiseMicrotaskRunner() {
- var events = promiseEvents;
- if (events.length > 0) {
- promiseEvents = new InternalArray;
- for (var i = 0; i < events.length; i += 2) {
- var value = events[i];
- var tasks = events[i + 1];
- for (var j = 0; j < tasks.length; j += 2) {
- var handler = tasks[j];
- var deferred = tasks[j + 1];
- try {
- var result = handler(value);
- if (result === deferred.promise)
- throw MakeTypeError('promise_cyclic', [result]);
- else if (IsPromise(result))
- result.chain(deferred.resolve, deferred.reject);
- else
- deferred.resolve(result);
- } catch(e) {
- // TODO(rossberg): perhaps log uncaught exceptions below.
- try { deferred.reject(e) } catch(e) {}
- }
- }
- }
+function PromiseHandle(value, handler, deferred) {
+ try {
+ var result = handler(value);
+ if (result === deferred.promise)
+ throw MakeTypeError('promise_cyclic', [result]);
+ else if (IsPromise(result))
+ %_CallFunction(result, deferred.resolve, deferred.reject, PromiseChain);
+ else
+ deferred.resolve(result);
+ } catch(e) {
+ // TODO(rossberg): perhaps log uncaught exceptions below.
+ try { deferred.reject(e) } catch(e) {}
}
}
-RunMicrotasks.runners.push(PromiseMicrotaskRunner);
// Multi-unwrapped chaining with thenable coercion.
function PromiseThen(onResolve, onReject) {
- onResolve = IS_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve;
+ onResolve =
+ IS_NULL_OR_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve;
+ onReject =
+ IS_NULL_OR_UNDEFINED(onReject) ? PromiseIdRejectHandler : onReject;
var that = this;
var constructor = this.constructor;
- return this.chain(
+ return %_CallFunction(
+ this,
function(x) {
x = PromiseCoerce(constructor, x);
return x === that ? onReject(MakeTypeError('promise_cyclic', [x])) :
IsPromise(x) ? x.then(onResolve, onReject) : onResolve(x);
},
- onReject
+ onReject,
+ PromiseChain
);
}
PromiseCoerce.table = new $WeakMap;
function PromiseCoerce(constructor, x) {
- if (!(IsPromise(x) || IS_NULL_OR_UNDEFINED(x))) {
- var then = x.then;
+ if (!IsPromise(x) && IS_SPEC_OBJECT(x)) {
+ var then;
+ try {
+ then = x.then;
+ } catch(r) {
+ var promise = %_CallFunction(constructor, r, PromiseRejected);
+ PromiseCoerce.table.set(x, promise);
+ return promise;
+ }
if (typeof then === 'function') {
if (PromiseCoerce.table.has(x)) {
return PromiseCoerce.table.get(x);
@@ -235,8 +238,8 @@ function PromiseCoerce(constructor, x) {
PromiseCoerce.table.set(x, deferred.promise);
try {
%_CallFunction(x, deferred.resolve, deferred.reject, then);
- } catch(e) {
- deferred.reject(e);
+ } catch(r) {
+ deferred.reject(r);
}
return deferred.promise;
}
@@ -250,19 +253,23 @@ function PromiseCoerce(constructor, x) {
function PromiseCast(x) {
// TODO(rossberg): cannot do better until we support @@create.
- return IsPromise(x) ? x : this.resolve(x);
+ return IsPromise(x) ? x : new this(function(resolve) { resolve(x) });
}
function PromiseAll(values) {
var deferred = %_CallFunction(this, PromiseDeferred);
var resolutions = [];
+ if (!%_IsArray(values)) {
+ deferred.reject(MakeTypeError('invalid_argument'));
+ return deferred.promise;
+ }
try {
var count = values.length;
if (count === 0) {
deferred.resolve(resolutions);
} else {
for (var i = 0; i < values.length; ++i) {
- this.cast(values[i]).chain(
+ this.resolve(values[i]).then(
function(i, x) {
resolutions[i] = x;
if (--count === 0) deferred.resolve(resolutions);
@@ -279,9 +286,13 @@ function PromiseAll(values) {
function PromiseOne(values) {
var deferred = %_CallFunction(this, PromiseDeferred);
+ if (!%_IsArray(values)) {
+ deferred.reject(MakeTypeError('invalid_argument'));
+ return deferred.promise;
+ }
try {
for (var i = 0; i < values.length; ++i) {
- this.cast(values[i]).chain(
+ this.resolve(values[i]).then(
function(x) { deferred.resolve(x) },
function(r) { deferred.reject(r) }
);
@@ -295,16 +306,15 @@ function PromiseOne(values) {
//-------------------------------------------------------------------
function SetUpPromise() {
- %CheckIsBootstrapping()
- var global_receiver = %GlobalReceiver(global);
- global_receiver.Promise = $Promise;
+ %CheckIsBootstrapping();
+ %SetProperty(global, 'Promise', $Promise, DONT_ENUM);
InstallFunctions($Promise, DONT_ENUM, [
"defer", PromiseDeferred,
- "resolve", PromiseResolved,
+ "accept", PromiseResolved,
"reject", PromiseRejected,
"all", PromiseAll,
"race", PromiseOne,
- "cast", PromiseCast
+ "resolve", PromiseCast
]);
InstallFunctions($Promise.prototype, DONT_ENUM, [
"chain", PromiseChain,
diff --git a/deps/v8/src/property-details-inl.h b/deps/v8/src/property-details-inl.h
new file mode 100644
index 000000000..98eb1cf58
--- /dev/null
+++ b/deps/v8/src/property-details-inl.h
@@ -0,0 +1,51 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PROPERTY_DETAILS_INL_H_
+#define V8_PROPERTY_DETAILS_INL_H_
+
+#include "objects.h"
+#include "property-details.h"
+#include "v8conversions.h"
+
+namespace v8 {
+namespace internal {
+
+inline bool Representation::CanContainDouble(double value) {
+ if (IsDouble() || is_more_general_than(Representation::Double())) {
+ return true;
+ }
+ if (IsInt32Double(value)) {
+ if (IsInteger32()) return true;
+ if (IsSmi()) return Smi::IsValid(static_cast<int32_t>(value));
+ }
+ return false;
+}
+
+} } // namespace v8::internal
+
+#endif // V8_PROPERTY_DETAILS_INL_H_
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index b8baff2c2..01050dbd4 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -148,6 +148,8 @@ class Representation {
return other.is_more_general_than(*this) || other.Equals(*this);
}
+ bool CanContainDouble(double value);
+
Representation generalize(Representation other) {
if (other.fits_into(*this)) return *this;
if (other.is_more_general_than(*this)) return other;
@@ -233,11 +235,11 @@ class PropertyDetails BASE_EMBEDDED {
| FieldIndexField::encode(field_index);
}
- int pointer() { return DescriptorPointer::decode(value_); }
+ int pointer() const { return DescriptorPointer::decode(value_); }
PropertyDetails set_pointer(int i) { return PropertyDetails(value_, i); }
- PropertyDetails CopyWithRepresentation(Representation representation) {
+ PropertyDetails CopyWithRepresentation(Representation representation) const {
return PropertyDetails(value_, representation);
}
PropertyDetails CopyAddAttributes(PropertyAttributes new_attributes) {
@@ -248,7 +250,7 @@ class PropertyDetails BASE_EMBEDDED {
// Conversion for storing details as Object*.
explicit inline PropertyDetails(Smi* smi);
- inline Smi* AsSmi();
+ inline Smi* AsSmi() const;
static uint8_t EncodeRepresentation(Representation representation) {
return representation.kind();
@@ -258,26 +260,26 @@ class PropertyDetails BASE_EMBEDDED {
return Representation::FromKind(static_cast<Representation::Kind>(bits));
}
- PropertyType type() { return TypeField::decode(value_); }
+ PropertyType type() const { return TypeField::decode(value_); }
PropertyAttributes attributes() const {
return AttributesField::decode(value_);
}
- int dictionary_index() {
+ int dictionary_index() const {
return DictionaryStorageField::decode(value_);
}
- Representation representation() {
+ Representation representation() const {
ASSERT(type() != NORMAL);
return DecodeRepresentation(RepresentationField::decode(value_));
}
- int field_index() {
+ int field_index() const {
return FieldIndexField::decode(value_);
}
- inline PropertyDetails AsDeleted();
+ inline PropertyDetails AsDeleted() const;
static bool IsValidIndex(int index) {
return DictionaryStorageField::is_valid(index);
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index da772dc86..baa5a0f99 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -187,12 +187,12 @@ class LookupResult BASE_EMBEDDED {
transition_(NULL),
cacheable_(true),
details_(NONE, NONEXISTENT, Representation::None()) {
- isolate->SetTopLookupResult(this);
+ isolate->set_top_lookup_result(this);
}
~LookupResult() {
ASSERT(isolate()->top_lookup_result() == this);
- isolate()->SetTopLookupResult(next_);
+ isolate()->set_top_lookup_result(next_);
}
Isolate* isolate() const { return isolate_; }
@@ -200,9 +200,9 @@ class LookupResult BASE_EMBEDDED {
void DescriptorResult(JSObject* holder, PropertyDetails details, int number) {
lookup_type_ = DESCRIPTOR_TYPE;
holder_ = holder;
+ transition_ = NULL;
details_ = details;
number_ = number;
- transition_ = NULL;
}
bool CanHoldValue(Handle<Object> value) {
@@ -246,92 +246,93 @@ class LookupResult BASE_EMBEDDED {
lookup_type_ = NOT_FOUND;
details_ = PropertyDetails(NONE, NONEXISTENT, Representation::None());
holder_ = NULL;
+ transition_ = NULL;
}
- JSObject* holder() {
+ JSObject* holder() const {
ASSERT(IsFound());
return JSObject::cast(holder_);
}
- JSProxy* proxy() {
+ JSProxy* proxy() const {
ASSERT(IsHandler());
return JSProxy::cast(holder_);
}
- PropertyType type() {
+ PropertyType type() const {
ASSERT(IsFound());
return details_.type();
}
- Representation representation() {
+ Representation representation() const {
ASSERT(IsFound());
ASSERT(!IsTransition());
ASSERT(details_.type() != NONEXISTENT);
return details_.representation();
}
- PropertyAttributes GetAttributes() {
+ PropertyAttributes GetAttributes() const {
ASSERT(!IsTransition());
ASSERT(IsFound());
ASSERT(details_.type() != NONEXISTENT);
return details_.attributes();
}
- PropertyDetails GetPropertyDetails() {
+ PropertyDetails GetPropertyDetails() const {
ASSERT(!IsTransition());
return details_;
}
- bool IsFastPropertyType() {
+ bool IsFastPropertyType() const {
ASSERT(IsFound());
return IsTransition() || type() != NORMAL;
}
// Property callbacks does not include transitions to callbacks.
- bool IsPropertyCallbacks() {
+ bool IsPropertyCallbacks() const {
ASSERT(!(details_.type() == CALLBACKS && !IsFound()));
return details_.type() == CALLBACKS;
}
- bool IsReadOnly() {
+ bool IsReadOnly() const {
ASSERT(IsFound());
ASSERT(!IsTransition());
ASSERT(details_.type() != NONEXISTENT);
return details_.IsReadOnly();
}
- bool IsField() {
+ bool IsField() const {
ASSERT(!(details_.type() == FIELD && !IsFound()));
return details_.type() == FIELD;
}
- bool IsNormal() {
+ bool IsNormal() const {
ASSERT(!(details_.type() == NORMAL && !IsFound()));
return details_.type() == NORMAL;
}
- bool IsConstant() {
+ bool IsConstant() const {
ASSERT(!(details_.type() == CONSTANT && !IsFound()));
return details_.type() == CONSTANT;
}
- bool IsConstantFunction() {
+ bool IsConstantFunction() const {
return IsConstant() && GetValue()->IsJSFunction();
}
- bool IsDontDelete() { return details_.IsDontDelete(); }
- bool IsDontEnum() { return details_.IsDontEnum(); }
- bool IsFound() { return lookup_type_ != NOT_FOUND; }
- bool IsTransition() { return lookup_type_ == TRANSITION_TYPE; }
- bool IsHandler() { return lookup_type_ == HANDLER_TYPE; }
- bool IsInterceptor() { return lookup_type_ == INTERCEPTOR_TYPE; }
+ bool IsDontDelete() const { return details_.IsDontDelete(); }
+ bool IsDontEnum() const { return details_.IsDontEnum(); }
+ bool IsFound() const { return lookup_type_ != NOT_FOUND; }
+ bool IsTransition() const { return lookup_type_ == TRANSITION_TYPE; }
+ bool IsHandler() const { return lookup_type_ == HANDLER_TYPE; }
+ bool IsInterceptor() const { return lookup_type_ == INTERCEPTOR_TYPE; }
// Is the result is a property excluding transitions and the null descriptor?
- bool IsProperty() {
+ bool IsProperty() const {
return IsFound() && !IsTransition();
}
- bool IsDataProperty() {
+ bool IsDataProperty() const {
switch (type()) {
case FIELD:
case NORMAL:
@@ -351,10 +352,10 @@ class LookupResult BASE_EMBEDDED {
return false;
}
- bool IsCacheable() { return cacheable_; }
+ bool IsCacheable() const { return cacheable_; }
void DisallowCaching() { cacheable_ = false; }
- Object* GetLazyValue() {
+ Object* GetLazyValue() const {
switch (type()) {
case FIELD:
return holder()->RawFastPropertyAt(GetFieldIndex().field_index());
@@ -379,66 +380,62 @@ class LookupResult BASE_EMBEDDED {
return NULL;
}
- Map* GetTransitionTarget() {
+ Map* GetTransitionTarget() const {
return transition_;
}
- PropertyDetails GetTransitionDetails() {
+ PropertyDetails GetTransitionDetails() const {
+ ASSERT(IsTransition());
return transition_->GetLastDescriptorDetails();
}
- bool IsTransitionToField() {
+ bool IsTransitionToField() const {
return IsTransition() && GetTransitionDetails().type() == FIELD;
}
- bool IsTransitionToConstant() {
+ bool IsTransitionToConstant() const {
return IsTransition() && GetTransitionDetails().type() == CONSTANT;
}
- int GetTransitionIndex() {
- ASSERT(IsTransition());
- return number_;
- }
-
- int GetDescriptorIndex() {
+ int GetDescriptorIndex() const {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
return number_;
}
- PropertyIndex GetFieldIndex() {
+ PropertyIndex GetFieldIndex() const {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
return PropertyIndex::NewFieldIndex(GetFieldIndexFromMap(holder()->map()));
}
- int GetLocalFieldIndexFromMap(Map* map) {
+ int GetLocalFieldIndexFromMap(Map* map) const {
return GetFieldIndexFromMap(map) - map->inobject_properties();
}
- int GetDictionaryEntry() {
+ int GetDictionaryEntry() const {
ASSERT(lookup_type_ == DICTIONARY_TYPE);
return number_;
}
- JSFunction* GetConstantFunction() {
+ JSFunction* GetConstantFunction() const {
ASSERT(type() == CONSTANT);
return JSFunction::cast(GetValue());
}
- Object* GetConstantFromMap(Map* map) {
+ Object* GetConstantFromMap(Map* map) const {
ASSERT(type() == CONSTANT);
return GetValueFromMap(map);
}
- JSFunction* GetConstantFunctionFromMap(Map* map) {
+ JSFunction* GetConstantFunctionFromMap(Map* map) const {
return JSFunction::cast(GetConstantFromMap(map));
}
- Object* GetConstant() {
+ Object* GetConstant() const {
ASSERT(type() == CONSTANT);
return GetValue();
}
- Object* GetCallbackObject() {
+ Object* GetCallbackObject() const {
ASSERT(type() == CALLBACKS && !IsTransition());
return GetValue();
}
@@ -447,7 +444,7 @@ class LookupResult BASE_EMBEDDED {
void Print(FILE* out);
#endif
- Object* GetValue() {
+ Object* GetValue() const {
if (lookup_type_ == DESCRIPTOR_TYPE) {
return GetValueFromMap(holder()->map());
}
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp-macro-assembler-tracer.cc
index c446b4b49..75e439247 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp-macro-assembler-tracer.cc
@@ -38,8 +38,9 @@ RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
RegExpMacroAssembler(assembler->zone()),
assembler_(assembler) {
unsigned int type = assembler->Implementation();
- ASSERT(type < 5);
- const char* impl_names[] = {"IA32", "ARM", "MIPS", "X64", "Bytecode"};
+ ASSERT(type < 6);
+ const char* impl_names[] = {"IA32", "ARM", "ARM64",
+ "MIPS", "X64", "Bytecode"};
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
}
diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp-macro-assembler.h
index 1ff8bd979..fc3100867 100644
--- a/deps/v8/src/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp-macro-assembler.h
@@ -53,6 +53,7 @@ class RegExpMacroAssembler {
enum IrregexpImplementation {
kIA32Implementation,
kARMImplementation,
+ kARM64Implementation,
kMIPSImplementation,
kX64Implementation,
kBytecodeImplementation
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 422da34e7..5142fd33d 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -153,25 +153,13 @@ namespace internal {
PropertyDetails name = PropertyDetails(Smi::cast(args[index]));
-// Assert that the given argument has a valid value for a StrictModeFlag
-// and store it in a StrictModeFlag variable with the given name.
+// Assert that the given argument has a valid value for a StrictMode
+// and store it in a StrictMode variable with the given name.
#define CONVERT_STRICT_MODE_ARG_CHECKED(name, index) \
RUNTIME_ASSERT(args[index]->IsSmi()); \
- RUNTIME_ASSERT(args.smi_at(index) == kStrictMode || \
- args.smi_at(index) == kNonStrictMode); \
- StrictModeFlag name = \
- static_cast<StrictModeFlag>(args.smi_at(index));
-
-
-// Assert that the given argument has a valid value for a LanguageMode
-// and store it in a LanguageMode variable with the given name.
-#define CONVERT_LANGUAGE_MODE_ARG(name, index) \
- ASSERT(args[index]->IsSmi()); \
- ASSERT(args.smi_at(index) == CLASSIC_MODE || \
- args.smi_at(index) == STRICT_MODE || \
- args.smi_at(index) == EXTENDED_MODE); \
- LanguageMode name = \
- static_cast<LanguageMode>(args.smi_at(index));
+ RUNTIME_ASSERT(args.smi_at(index) == STRICT || \
+ args.smi_at(index) == SLOPPY); \
+ StrictMode name = static_cast<StrictMode>(args.smi_at(index));
static Handle<Map> ComputeObjectLiteralMap(
@@ -298,7 +286,7 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
// Array index as string (uint32).
result = JSObject::SetOwnElement(
- boilerplate, element_index, value, kNonStrictMode);
+ boilerplate, element_index, value, SLOPPY);
} else {
Handle<String> name(String::cast(*key));
ASSERT(!name->AsArrayIndex(&element_index));
@@ -309,7 +297,7 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
} else if (key->ToArrayIndex(&element_index)) {
// Array index (uint32).
result = JSObject::SetOwnElement(
- boilerplate, element_index, value, kNonStrictMode);
+ boilerplate, element_index, value, SLOPPY);
} else {
// Non-uint32 number.
ASSERT(key->IsNumber());
@@ -480,7 +468,7 @@ static Handle<Object> CreateLiteralBoilerplate(
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_CreateObjectLiteral) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
@@ -582,7 +570,7 @@ static MaybeObject* CreateArrayLiteralImpl(Isolate* isolate,
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_CreateArrayLiteral) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
@@ -595,7 +583,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralStubBailout) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_CreateArrayLiteralStubBailout) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
@@ -633,7 +621,33 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreatePrivateSymbol) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolName) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateGlobalPrivateSymbol) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ Handle<JSObject> registry = isolate->GetSymbolRegistry();
+ Handle<String> part = isolate->factory()->private_intern_string();
+ Handle<JSObject> privates =
+ Handle<JSObject>::cast(JSObject::GetProperty(registry, part));
+ Handle<Object> symbol = JSObject::GetProperty(privates, name);
+ if (!symbol->IsSymbol()) {
+ ASSERT(symbol->IsUndefined());
+ symbol = isolate->factory()->NewPrivateSymbol();
+ Handle<Symbol>::cast(symbol)->set_name(*name);
+ JSObject::SetProperty(privates, name, symbol, NONE, STRICT);
+ }
+ return *symbol;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewSymbolWrapper) {
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(Symbol, symbol, 0);
+ return symbol->ToObject(isolate);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolDescription) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(Symbol, symbol, 0);
@@ -641,6 +655,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolName) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolRegistry) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
+ return *isolate->GetSymbolRegistry();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolIsPrivate) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -890,6 +911,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferIsView) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferNeuter) {
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, array_buffer, 0);
+ if (array_buffer->backing_store() == NULL) {
+ CHECK(Smi::FromInt(0) == array_buffer->byte_length());
+ return isolate->heap()->undefined_value();
+ }
ASSERT(!array_buffer->is_external());
void* backing_store = array_buffer->backing_store();
size_t byte_length = NumberToSize(isolate, array_buffer->byte_length());
@@ -901,11 +926,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferNeuter) {
void Runtime::ArrayIdToTypeAndSize(
- int arrayId, ExternalArrayType* array_type, size_t* element_size) {
+ int arrayId,
+ ExternalArrayType* array_type,
+ ElementsKind* external_elements_kind,
+ ElementsKind* fixed_elements_kind,
+ size_t* element_size) {
switch (arrayId) {
#define ARRAY_ID_CASE(Type, type, TYPE, ctype, size) \
case ARRAY_ID_##TYPE: \
*array_type = kExternal##Type##Array; \
+ *external_elements_kind = EXTERNAL_##TYPE##_ELEMENTS; \
+ *fixed_elements_kind = TYPE##_ELEMENTS; \
*element_size = size; \
break;
@@ -923,7 +954,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
ASSERT(args.length() == 5);
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
CONVERT_SMI_ARG_CHECKED(arrayId, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, maybe_buffer, 2);
CONVERT_ARG_HANDLE_CHECKED(Object, byte_offset_object, 3);
CONVERT_ARG_HANDLE_CHECKED(Object, byte_length_object, 4);
@@ -935,18 +966,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
size_t element_size = 1; // Bogus initialization.
- Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &element_size);
+ ElementsKind external_elements_kind =
+ EXTERNAL_INT8_ELEMENTS; // Bogus initialization.
+ ElementsKind fixed_elements_kind = INT8_ELEMENTS; // Bogus initialization.
+ Runtime::ArrayIdToTypeAndSize(arrayId,
+ &array_type,
+ &external_elements_kind,
+ &fixed_elements_kind,
+ &element_size);
- holder->set_buffer(*buffer);
holder->set_byte_offset(*byte_offset_object);
holder->set_byte_length(*byte_length_object);
size_t byte_offset = NumberToSize(isolate, *byte_offset_object);
size_t byte_length = NumberToSize(isolate, *byte_length_object);
- size_t array_buffer_byte_length =
- NumberToSize(isolate, buffer->byte_length());
- CHECK(byte_offset <= array_buffer_byte_length);
- CHECK(array_buffer_byte_length - byte_offset >= byte_length);
CHECK_EQ(0, static_cast<int>(byte_length % element_size));
size_t length = byte_length / element_size;
@@ -959,14 +992,34 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(length);
holder->set_length(*length_obj);
- holder->set_weak_next(buffer->weak_first_view());
- buffer->set_weak_first_view(*holder);
-
- Handle<ExternalArray> elements =
- isolate->factory()->NewExternalArray(
- static_cast<int>(length), array_type,
- static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
- holder->set_elements(*elements);
+ if (!maybe_buffer->IsNull()) {
+ Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(*maybe_buffer));
+
+ size_t array_buffer_byte_length =
+ NumberToSize(isolate, buffer->byte_length());
+ CHECK(byte_offset <= array_buffer_byte_length);
+ CHECK(array_buffer_byte_length - byte_offset >= byte_length);
+
+ holder->set_buffer(*buffer);
+ holder->set_weak_next(buffer->weak_first_view());
+ buffer->set_weak_first_view(*holder);
+
+ Handle<ExternalArray> elements =
+ isolate->factory()->NewExternalArray(
+ static_cast<int>(length), array_type,
+ static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
+ Handle<Map> map =
+ JSObject::GetElementsTransitionMap(holder, external_elements_kind);
+ holder->set_map_and_elements(*map, *elements);
+ ASSERT(IsExternalArrayElementsKind(holder->map()->elements_kind()));
+ } else {
+ holder->set_buffer(Smi::FromInt(0));
+ holder->set_weak_next(isolate->heap()->undefined_value());
+ Handle<FixedTypedArrayBase> elements =
+ isolate->factory()->NewFixedTypedArray(
+ static_cast<int>(length), array_type);
+ holder->set_elements(*elements);
+ }
return isolate->heap()->undefined_value();
}
@@ -992,7 +1045,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
size_t element_size = 1; // Bogus initialization.
- Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &element_size);
+ ElementsKind external_elements_kind =
+ EXTERNAL_INT8_ELEMENTS; // Bogus intialization.
+ ElementsKind fixed_elements_kind = INT8_ELEMENTS; // Bogus initialization.
+ Runtime::ArrayIdToTypeAndSize(arrayId,
+ &array_type,
+ &external_elements_kind,
+ &fixed_elements_kind,
+ &element_size);
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
if (source->IsJSTypedArray() &&
@@ -1045,7 +1105,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
isolate->factory()->NewExternalArray(
static_cast<int>(length), array_type,
static_cast<uint8_t*>(buffer->backing_store()));
- holder->set_elements(*elements);
+ Handle<Map> map = JSObject::GetElementsTransitionMap(
+ holder, external_elements_kind);
+ holder->set_map_and_elements(*map, *elements);
if (source->IsJSTypedArray()) {
Handle<JSTypedArray> typed_array(JSTypedArray::cast(*source));
@@ -1053,7 +1115,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
if (typed_array->type() == holder->type()) {
uint8_t* backing_store =
static_cast<uint8_t*>(
- JSArrayBuffer::cast(typed_array->buffer())->backing_store());
+ typed_array->GetBuffer()->backing_store());
size_t source_byte_offset =
NumberToSize(isolate, typed_array->byte_offset());
memcpy(
@@ -1082,13 +1144,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
return typed_array->accessor(); \
}
-TYPED_ARRAY_GETTER(Buffer, buffer)
TYPED_ARRAY_GETTER(ByteLength, byte_length)
TYPED_ARRAY_GETTER(ByteOffset, byte_offset)
TYPED_ARRAY_GETTER(Length, length)
#undef TYPED_ARRAY_GETTER
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayGetBuffer) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, holder, 0);
+ if (!holder->IsJSTypedArray())
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "not_typed_array", HandleVector<Object>(NULL, 0)));
+ Handle<JSTypedArray> typed_array(JSTypedArray::cast(*holder));
+ return *typed_array->GetBuffer();
+}
+
+
// Return codes for Runtime_TypedArraySetFastCases.
// Should be synchronized with typedarray.js natives.
enum TypedArraySetResultCodes {
@@ -1134,10 +1207,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArraySetFastCases) {
size_t source_offset = NumberToSize(isolate, source->byte_offset());
uint8_t* target_base =
static_cast<uint8_t*>(
- JSArrayBuffer::cast(target->buffer())->backing_store()) + target_offset;
+ target->GetBuffer()->backing_store()) + target_offset;
uint8_t* source_base =
static_cast<uint8_t*>(
- JSArrayBuffer::cast(source->buffer())->backing_store()) + source_offset;
+ source->GetBuffer()->backing_store()) + source_offset;
// Typed arrays of the same type: use memmove.
if (target->type() == source->type()) {
@@ -1153,8 +1226,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArraySetFastCases) {
target_base + target_byte_length > source_base)) {
// We do not support overlapping ArrayBuffers
ASSERT(
- JSArrayBuffer::cast(target->buffer())->backing_store() ==
- JSArrayBuffer::cast(source->buffer())->backing_store());
+ target->GetBuffer()->backing_store() ==
+ source->GetBuffer()->backing_store());
return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING);
} else { // Non-overlapping typed arrays
return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING);
@@ -1162,6 +1235,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArraySetFastCases) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayMaxSizeInHeap) {
+ ASSERT_OBJECT_SIZE(FLAG_typed_array_max_size_in_heap);
+ return Smi::FromInt(FLAG_typed_array_max_size_in_heap);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_DataViewInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
@@ -1656,7 +1735,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
!isolate->MayNamedAccessWrapper(Handle<JSObject>::cast(obj),
isolate->factory()->proto_string(),
v8::ACCESS_GET)) {
- isolate->ReportFailedAccessCheck(JSObject::cast(*obj), v8::ACCESS_GET);
+ isolate->ReportFailedAccessCheckWrapper(Handle<JSObject>::cast(obj),
+ v8::ACCESS_GET);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->undefined_value();
}
@@ -1687,11 +1767,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetPrototype) {
!isolate->MayNamedAccessWrapper(obj,
isolate->factory()->proto_string(),
v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(*obj, v8::ACCESS_SET);
+ isolate->ReportFailedAccessCheckWrapper(obj, v8::ACCESS_SET);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->undefined_value();
}
- if (FLAG_harmony_observation && obj->map()->is_observed()) {
+ if (obj->map()->is_observed()) {
Handle<Object> old_value(
GetPrototypeSkipHiddenPrototypes(isolate, *obj), isolate);
@@ -1790,7 +1870,7 @@ static AccessCheckResult CheckPropertyAccess(Handle<JSObject> obj,
return ACCESS_ALLOWED;
}
- obj->GetIsolate()->ReportFailedAccessCheck(*obj, access_type);
+ obj->GetIsolate()->ReportFailedAccessCheckWrapper(obj, access_type);
return ACCESS_FORBIDDEN;
}
@@ -1829,7 +1909,7 @@ static AccessCheckResult CheckPropertyAccess(Handle<JSObject> obj,
break;
}
- isolate->ReportFailedAccessCheck(*obj, access_type);
+ isolate->ReportFailedAccessCheckWrapper(obj, access_type);
return ACCESS_FORBIDDEN;
}
@@ -1863,7 +1943,7 @@ static Handle<Object> GetOwnProperty(Isolate* isolate,
case ACCESS_ABSENT: return factory->undefined_value();
}
- PropertyAttributes attrs = obj->GetLocalPropertyAttribute(*name);
+ PropertyAttributes attrs = JSReceiver::GetLocalPropertyAttribute(obj, name);
if (attrs == ABSENT) {
RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
return factory->undefined_value();
@@ -2053,6 +2133,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAccessorProperty) {
CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3);
CONVERT_SMI_ARG_CHECKED(attribute, 4);
CONVERT_SMI_ARG_CHECKED(access_control, 5);
+ RUNTIME_ASSERT(getter->IsUndefined() || getter->IsFunctionTemplateInfo());
+ RUNTIME_ASSERT(setter->IsUndefined() || setter->IsFunctionTemplateInfo());
JSObject::DefineAccessor(object,
name,
InstantiateAccessorComponent(isolate, getter),
@@ -2076,7 +2158,7 @@ static Failure* ThrowRedeclarationError(Isolate* isolate,
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_DeclareGlobals) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
Handle<GlobalObject> global = Handle<GlobalObject>(
@@ -2106,17 +2188,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
// value of the variable if the property is already there.
// Do the lookup locally only, see ES5 erratum.
LookupResult lookup(isolate);
- if (FLAG_es52_globals) {
- global->LocalLookup(*name, &lookup, true);
- } else {
- global->Lookup(*name, &lookup);
- }
+ global->LocalLookup(*name, &lookup, true);
if (lookup.IsFound()) {
// We found an existing property. Unless it was an interceptor
// that claims the property is absent, skip this declaration.
if (!lookup.IsInterceptor()) continue;
- PropertyAttributes attributes = global->GetPropertyAttribute(*name);
- if (attributes != ABSENT) continue;
+ if (JSReceiver::GetPropertyAttribute(global, name) != ABSENT) continue;
// Fall-through and introduce the absent property by using
// SetProperty.
}
@@ -2145,7 +2222,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
attr |= READ_ONLY;
}
- LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
+ StrictMode strict_mode = DeclareGlobalsStrictMode::decode(flags);
if (!lookup.IsFound() || is_function) {
// If the local property exists, check that we can reconfigure it
@@ -2167,7 +2244,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
RETURN_IF_EMPTY_HANDLE(isolate,
JSObject::SetProperty(
global, name, value, static_cast<PropertyAttributes>(attr),
- language_mode == CLASSIC_MODE ? kNonStrictMode : kStrictMode));
+ strict_mode));
}
}
@@ -2176,7 +2253,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_DeclareContextSlot) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
@@ -2223,8 +2300,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
Handle<JSObject> object = Handle<JSObject>::cast(holder);
RETURN_IF_EMPTY_HANDLE(
isolate,
- JSReceiver::SetProperty(object, name, initial_value, mode,
- kNonStrictMode));
+ JSReceiver::SetProperty(object, name, initial_value, mode, SLOPPY));
}
}
@@ -2270,7 +2346,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
JSObject::SetLocalPropertyIgnoreAttributes(object, name, value, mode));
} else {
RETURN_IF_EMPTY_HANDLE(isolate,
- JSReceiver::SetProperty(object, name, value, mode, kNonStrictMode));
+ JSReceiver::SetProperty(object, name, value, mode, SLOPPY));
}
}
@@ -2291,9 +2367,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
RUNTIME_ASSERT(args[1]->IsSmi());
- CONVERT_LANGUAGE_MODE_ARG(language_mode, 1);
- StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
+ CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 1);
// According to ECMA-262, section 12.2, page 62, the property must
// not be deletable.
@@ -2309,15 +2383,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
LookupResult lookup(isolate);
isolate->context()->global_object()->LocalLookup(*name, &lookup, true);
if (lookup.IsInterceptor()) {
+ Handle<JSObject> holder(lookup.holder());
PropertyAttributes intercepted =
- lookup.holder()->GetPropertyAttribute(*name);
+ JSReceiver::GetPropertyAttribute(holder, name);
if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
// Found an interceptor that's not read only.
if (assign) {
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
Handle<Object> result = JSObject::SetPropertyForResult(
- handle(lookup.holder()), &lookup, name, value, attributes,
- strict_mode_flag);
+ holder, &lookup, name, value, attributes, strict_mode);
RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
} else {
@@ -2330,7 +2404,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
Handle<GlobalObject> global(isolate->context()->global_object());
Handle<Object> result = JSReceiver::SetProperty(
- global, name, value, attributes, strict_mode_flag);
+ global, name, value, attributes, strict_mode);
RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
}
@@ -2338,7 +2412,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_InitializeConstGlobal) {
SealHandleScope shs(isolate);
// All constants are declared with an initial value. The name
// of the constant is the first argument and the initial value
@@ -2381,11 +2455,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
// BUG 1213575: Handle the case where we have to set a read-only
// property through an interceptor and only do it if it's
// uninitialized, e.g. the hole. Nirk...
- // Passing non-strict mode because the property is writable.
+ // Passing sloppy mode because the property is writable.
RETURN_IF_EMPTY_HANDLE(
isolate,
- JSReceiver::SetProperty(global, name, value, attributes,
- kNonStrictMode));
+ JSReceiver::SetProperty(global, name, value, attributes, SLOPPY));
return *value;
}
@@ -2416,7 +2489,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_InitializeConstContextSlot) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -2455,7 +2528,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
// Strict mode not needed (const disallowed in strict mode).
RETURN_IF_EMPTY_HANDLE(
isolate,
- JSReceiver::SetProperty(global, name, value, NONE, kNonStrictMode));
+ JSReceiver::SetProperty(global, name, value, NONE, SLOPPY));
return *value;
}
@@ -2506,8 +2579,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
// Strict mode not needed (const disallowed in strict mode).
RETURN_IF_EMPTY_HANDLE(
isolate,
- JSReceiver::SetProperty(object, name, value, attributes,
- kNonStrictMode));
+ JSReceiver::SetProperty(object, name, value, attributes, SLOPPY));
}
}
@@ -2521,14 +2593,14 @@ RUNTIME_FUNCTION(MaybeObject*,
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_SMI_ARG_CHECKED(properties, 1);
- if (object->HasFastProperties()) {
+ if (object->HasFastProperties() && !object->IsJSGlobalProxy()) {
JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties);
}
return *object;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_RegExpExec) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
@@ -2549,7 +2621,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_RegExpConstructResult) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_SMI_ARG_CHECKED(elements_count, 0);
@@ -2587,7 +2659,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
HandleScope scope(isolate);
- DisallowHeapAllocation no_allocation;
ASSERT(args.length() == 5);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
@@ -2668,7 +2739,7 @@ static Handle<JSFunction> InstallBuiltin(Isolate* isolate,
code,
false);
optimized->shared()->DontAdaptArguments();
- JSReceiver::SetProperty(holder, key, optimized, NONE, kStrictMode);
+ JSReceiver::SetProperty(holder, key, optimized, NONE, STRICT);
return optimized;
}
@@ -2690,7 +2761,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsClassicModeFunction) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsSloppyModeFunction) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
@@ -2704,7 +2775,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsClassicModeFunction) {
}
JSFunction* function = JSFunction::cast(callable);
SharedFunctionInfo* shared = function->shared();
- return isolate->heap()->ToBoolean(shared->is_classic_mode());
+ return isolate->heap()->ToBoolean(shared->strict_mode() == SLOPPY);
}
@@ -2724,7 +2795,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
JSFunction* function = JSFunction::cast(callable);
SharedFunctionInfo* shared = function->shared();
- if (shared->native() || !shared->is_classic_mode()) {
+ if (shared->native() || shared->strict_mode() == STRICT) {
return isolate->heap()->undefined_value();
}
// Returns undefined for strict or native functions, or
@@ -2736,7 +2807,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MaterializeRegExpLiteral) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_MaterializeRegExpLiteral) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
@@ -3054,7 +3125,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSGeneratorObject) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_CreateJSGeneratorObject) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
@@ -3080,7 +3151,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSGeneratorObject) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SuspendJSGeneratorObject) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_SuspendJSGeneratorObject) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
@@ -3131,7 +3202,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SuspendJSGeneratorObject) {
// inlined into GeneratorNext and GeneratorThrow. EmitGeneratorResumeResume is
// called in any case, as it needs to reconstruct the stack frame and make space
// for arguments and operands.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ResumeJSGeneratorObject) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
@@ -3150,6 +3221,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
int offset = generator_object->continuation();
ASSERT(offset > 0);
frame->set_pc(pc + offset);
+ if (FLAG_enable_ool_constant_pool) {
+ frame->set_constant_pool(
+ generator_object->function()->code()->constant_pool());
+ }
generator_object->set_continuation(JSGeneratorObject::kGeneratorExecuting);
FixedArray* operand_stack = generator_object->operand_stack();
@@ -3175,7 +3250,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowGeneratorStateError) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ThrowGeneratorStateError) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
@@ -3208,7 +3283,7 @@ MUST_USE_RESULT static MaybeObject* CharFromCode(Isolate* isolate,
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_StringCharCodeAt) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -3308,8 +3383,7 @@ class FixedArrayBuilder {
}
Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
- Factory* factory = target_array->GetIsolate()->factory();
- factory->SetContent(target_array, array_);
+ JSArray::SetContent(target_array, array_);
target_array->set_length(Smi::FromInt(length_));
return target_array;
}
@@ -3404,6 +3478,7 @@ class ReplacementStringBuilder {
Handle<String> joined_string;
if (is_ascii_) {
Handle<SeqOneByteString> seq = NewRawOneByteString(character_count_);
+ RETURN_IF_EMPTY_HANDLE_VALUE(heap_->isolate(), seq, Handle<String>());
DisallowHeapAllocation no_gc;
uint8_t* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
@@ -3414,6 +3489,7 @@ class ReplacementStringBuilder {
} else {
// Non-ASCII.
Handle<SeqTwoByteString> seq = NewRawTwoByteString(character_count_);
+ RETURN_IF_EMPTY_HANDLE_VALUE(heap_->isolate(), seq, Handle<String>());
DisallowHeapAllocation no_gc;
uc16* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
@@ -3428,9 +3504,11 @@ class ReplacementStringBuilder {
void IncrementCharacterCount(int by) {
if (character_count_ > String::kMaxLength - by) {
- V8::FatalProcessOutOfMemory("String.replace result too large.");
+ STATIC_ASSERT(String::kMaxLength < kMaxInt);
+ character_count_ = kMaxInt;
+ } else {
+ character_count_ += by;
}
- character_count_ += by;
}
private:
@@ -3911,20 +3989,25 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalAtomRegExpWithString(
static_cast<int64_t>(pattern_len)) *
static_cast<int64_t>(matches) +
static_cast<int64_t>(subject_len);
- if (result_len_64 > INT_MAX) return Failure::OutOfMemoryException(0x11);
- int result_len = static_cast<int>(result_len_64);
+ int result_len;
+ if (result_len_64 > static_cast<int64_t>(String::kMaxLength)) {
+ STATIC_ASSERT(String::kMaxLength < kMaxInt);
+ result_len = kMaxInt; // Provoke exception.
+ } else {
+ result_len = static_cast<int>(result_len_64);
+ }
int subject_pos = 0;
int result_pos = 0;
- Handle<ResultSeqString> result;
+ Handle<String> result_seq;
if (ResultSeqString::kHasAsciiEncoding) {
- result = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawOneByteString(result_len));
+ result_seq = isolate->factory()->NewRawOneByteString(result_len);
} else {
- result = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawTwoByteString(result_len));
+ result_seq = isolate->factory()->NewRawTwoByteString(result_len);
}
+ RETURN_IF_EMPTY_HANDLE(isolate, result_seq);
+ Handle<ResultSeqString> result = Handle<ResultSeqString>::cast(result_seq);
for (int i = 0; i < matches; i++) {
// Copy non-matched subject content.
@@ -4053,7 +4136,9 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithString(
capture_count,
global_cache.LastSuccessfulMatch());
- return *(builder.ToString());
+ Handle<String> result = builder.ToString();
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -4102,6 +4187,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithEmptyString(
answer = Handle<ResultSeqString>::cast(
isolate->factory()->NewRawTwoByteString(new_length));
}
+ ASSERT(!answer.is_null());
int prev = 0;
int position = 0;
@@ -4144,11 +4230,9 @@ MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithEmptyString(
if (delta == 0) return *answer;
Address end_of_string = answer->address() + string_size;
- isolate->heap()->CreateFillerObjectAt(end_of_string, delta);
- if (Marking::IsBlack(Marking::MarkBitFrom(*answer))) {
- MemoryChunk::IncrementLiveBytesFromMutator(answer->address(), -delta);
- }
-
+ Heap* heap = isolate->heap();
+ heap->CreateFillerObjectAt(end_of_string, delta);
+ heap->AdjustLiveBytes(answer->address(), -delta, Heap::FROM_MUTATOR);
return *answer;
}
@@ -4201,8 +4285,8 @@ Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
replace,
found,
recursion_limit - 1);
- if (*found) return isolate->factory()->NewConsString(new_first, second);
if (new_first.is_null()) return new_first;
+ if (*found) return isolate->factory()->NewConsString(new_first, second);
Handle<String> new_second =
StringReplaceOneCharWithString(isolate,
@@ -4211,8 +4295,8 @@ Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
replace,
found,
recursion_limit - 1);
- if (*found) return isolate->factory()->NewConsString(first, new_second);
if (new_second.is_null()) return new_second;
+ if (*found) return isolate->factory()->NewConsString(first, new_second);
return subject;
} else {
@@ -4221,6 +4305,7 @@ Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
*found = true;
Handle<String> first = isolate->factory()->NewSubString(subject, 0, index);
Handle<String> cons1 = isolate->factory()->NewConsString(first, replace);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, cons1, Handle<String>());
Handle<String> second =
isolate->factory()->NewSubString(subject, index + 1, subject->length());
return isolate->factory()->NewConsString(cons1, second);
@@ -4246,6 +4331,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceOneCharWithString) {
&found,
kRecursionLimit);
if (!result.is_null()) return *result;
+ if (isolate->has_pending_exception()) return Failure::Exception();
return *StringReplaceOneCharWithString(isolate,
FlattenGetString(subject),
search,
@@ -4467,7 +4553,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_SubString) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -4576,7 +4662,7 @@ static MaybeObject* SearchRegExpMultiple(
Handle<FixedArray> cached_fixed_array =
Handle<FixedArray>(FixedArray::cast(*cached_answer));
// The cache FixedArray is a COW-array and can therefore be reused.
- isolate->factory()->SetContent(result_array, cached_fixed_array);
+ JSArray::SetContent(result_array, cached_fixed_array);
// The actual length of the result array is stored in the last element of
// the backing store (the backing FixedArray may have a larger capacity).
Object* cached_fixed_array_last_element =
@@ -4835,21 +4921,13 @@ static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) {
}
-MaybeObject* Runtime::GetElementOrCharAtOrFail(Isolate* isolate,
- Handle<Object> object,
- uint32_t index) {
- CALL_HEAP_FUNCTION_PASS_EXCEPTION(isolate,
- GetElementOrCharAt(isolate, object, index));
-}
-
-
-MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
- Handle<Object> object,
- uint32_t index) {
+Handle<Object> Runtime::GetElementOrCharAt(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index) {
// Handle [] indexing on Strings
if (object->IsString()) {
Handle<Object> result = GetCharAt(Handle<String>::cast(object), index);
- if (!result->IsUndefined()) return *result;
+ if (!result->IsUndefined()) return result;
}
// Handle [] indexing on String objects
@@ -4857,14 +4935,16 @@ MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
Handle<JSValue> js_value = Handle<JSValue>::cast(object);
Handle<Object> result =
GetCharAt(Handle<String>(String::cast(js_value->value())), index);
- if (!result->IsUndefined()) return *result;
+ if (!result->IsUndefined()) return result;
}
+ Handle<Object> result;
if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
- return object->GetPrototype(isolate)->GetElement(isolate, index);
+ Handle<Object> proto(object->GetPrototype(isolate), isolate);
+ return Object::GetElement(isolate, proto, index);
+ } else {
+ return Object::GetElement(isolate, object, index);
}
-
- return object->GetElement(isolate, index);
}
@@ -4923,7 +5003,9 @@ MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
// Check if the given key is an array index.
uint32_t index;
if (key->ToArrayIndex(&index)) {
- return GetElementOrCharAt(isolate, object, index);
+ Handle<Object> result = GetElementOrCharAt(isolate, object, index);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
// Convert the key to a name - possibly by calling back into JavaScript.
@@ -4933,7 +5015,9 @@ MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
// Check if the name is trivially convertible to an index and get
// the element if so.
if (name->AsArrayIndex(&index)) {
- return GetElementOrCharAt(isolate, object, index);
+ Handle<Object> result = GetElementOrCharAt(isolate, object, index);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
} else {
return object->GetProperty(*name);
}
@@ -4993,8 +5077,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
int offset = result.GetFieldIndex().field_index();
// Do not track double fields in the keyed lookup cache. Reading
// double values requires boxing.
- if (!FLAG_track_double_fields ||
- !result.representation().IsDouble()) {
+ if (!result.representation().IsDouble()) {
keyed_lookup_cache->Update(receiver_map, key, offset);
}
return receiver->FastPropertyAt(result.representation(), offset);
@@ -5129,7 +5212,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
name,
obj_value,
handle(lookup.holder()),
- kStrictMode);
+ STRICT);
RETURN_IF_EMPTY_HANDLE(isolate, result_object);
return *result_object;
}
@@ -5202,7 +5285,7 @@ Handle<Object> Runtime::SetObjectProperty(Isolate* isolate,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attr,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
SetPropertyMode set_mode = attr == NONE ? SET_PROPERTY : DEFINE_PROPERTY;
if (object->IsUndefined() || object->IsNull()) {
@@ -5320,7 +5403,7 @@ Handle<Object> Runtime::ForceSetObjectProperty(Isolate* isolate,
return value;
}
- return JSObject::SetElement(js_object, index, value, attr, kNonStrictMode,
+ return JSObject::SetElement(js_object, index, value, attr, SLOPPY,
false,
DEFINE_PROPERTY);
}
@@ -5328,7 +5411,7 @@ Handle<Object> Runtime::ForceSetObjectProperty(Isolate* isolate,
if (key->IsName()) {
Handle<Name> name = Handle<Name>::cast(key);
if (name->AsArrayIndex(&index)) {
- return JSObject::SetElement(js_object, index, value, attr, kNonStrictMode,
+ return JSObject::SetElement(js_object, index, value, attr, SLOPPY,
false,
DEFINE_PROPERTY);
} else {
@@ -5346,7 +5429,7 @@ Handle<Object> Runtime::ForceSetObjectProperty(Isolate* isolate,
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
- return JSObject::SetElement(js_object, index, value, attr, kNonStrictMode,
+ return JSObject::SetElement(js_object, index, value, attr, SLOPPY,
false,
DEFINE_PROPERTY);
} else {
@@ -5399,6 +5482,17 @@ MaybeObject* Runtime::DeleteObjectProperty(Isolate* isolate,
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHiddenProperty) {
+ HandleScope scope(isolate);
+ RUNTIME_ASSERT(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ return *JSObject::SetHiddenProperty(object, key, value);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
@@ -5413,10 +5507,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
PropertyAttributes attributes =
static_cast<PropertyAttributes>(unchecked_attributes);
- StrictModeFlag strict_mode = kNonStrictMode;
+ StrictMode strict_mode = SLOPPY;
if (args.length() == 5) {
- CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode_flag, 4);
- strict_mode = strict_mode_flag;
+ CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode_arg, 4);
+ strict_mode = strict_mode_arg;
}
Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key,
@@ -5595,7 +5689,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) {
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 2);
- JSReceiver::DeleteMode delete_mode = (strict_mode == kStrictMode)
+ JSReceiver::DeleteMode delete_mode = strict_mode == STRICT
? JSReceiver::STRICT_DELETION : JSReceiver::NORMAL_DELETION;
Handle<Object> result = JSReceiver::DeleteProperty(object, key, delete_mode);
RETURN_IF_EMPTY_HANDLE(isolate, result);
@@ -5693,13 +5787,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- CONVERT_ARG_CHECKED(Name, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
- PropertyAttributes att = object->GetLocalPropertyAttribute(key);
+ PropertyAttributes att = JSReceiver::GetLocalPropertyAttribute(object, key);
if (att == ABSENT || (att & DONT_ENUM) != 0) {
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->false_value();
@@ -5780,10 +5874,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
if (obj->IsJSGlobalProxy()) {
// Only collect names if access is permitted.
if (obj->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*obj,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*obj, v8::ACCESS_KEYS);
+ !isolate->MayNamedAccessWrapper(obj,
+ isolate->factory()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheckWrapper(obj, v8::ACCESS_KEYS);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
}
@@ -5800,10 +5894,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
for (int i = 0; i < length; i++) {
// Only collect names if access is permitted.
if (jsproto->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*jsproto,
- isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*jsproto, v8::ACCESS_KEYS);
+ !isolate->MayNamedAccessWrapper(jsproto,
+ isolate->factory()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheckWrapper(jsproto, v8::ACCESS_KEYS);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
}
@@ -5847,7 +5941,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
}
}
next_copy_index += local_property_count[i];
- if (jsproto->HasHiddenProperties()) {
+
+ // Hidden properties only show up if the filter does not skip strings.
+ if ((filter & STRING) == 0 && JSObject::HasHiddenProperties(jsproto)) {
hidden_strings++;
}
if (i < length - 1) {
@@ -5951,9 +6047,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
if (object->IsJSGlobalProxy()) {
// Do access checks before going to the global object.
if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(*object, isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ !isolate->MayNamedAccessWrapper(object,
+ isolate->factory()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ isolate->ReportFailedAccessCheckWrapper(object, v8::ACCESS_KEYS);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
}
@@ -6029,7 +6126,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
if (index < n) {
return frame->GetParameter(index);
} else {
- return isolate->initial_object_prototype()->GetElement(isolate, index);
+ Handle<Object> initial_prototype(isolate->initial_object_prototype());
+ Handle<Object> result =
+ Object::GetElement(isolate, initial_prototype, index);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
}
@@ -6037,7 +6138,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
if (key->Equals(isolate->heap()->length_string())) return Smi::FromInt(n);
if (key->Equals(isolate->heap()->callee_string())) {
JSFunction* function = frame->function();
- if (!function->shared()->is_classic_mode()) {
+ if (function->shared()->strict_mode() == STRICT) {
return isolate->Throw(*isolate->factory()->NewTypeError(
"strict_arguments_callee", HandleVector<Object>(NULL, 0)));
}
@@ -6225,7 +6326,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_URIEscape) {
Handle<String> result = string->IsOneByteRepresentationUnderneath()
? URIEscape::Escape<uint8_t>(isolate, source)
: URIEscape::Escape<uc16>(isolate, source);
- if (result.is_null()) return Failure::OutOfMemoryException(0x12);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
}
@@ -6285,49 +6386,44 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseFloat) {
}
+static inline bool ToUpperOverflows(uc32 character) {
+ // y with umlauts and the micro sign are the only characters that stop
+ // fitting into one-byte when converting to uppercase.
+ static const uc32 yuml_code = 0xff;
+ static const uc32 micro_code = 0xb5;
+ return (character == yuml_code || character == micro_code);
+}
+
+
template <class Converter>
MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
Isolate* isolate,
- String* s,
- String::Encoding result_encoding,
- int length,
- int input_string_length,
+ String* string,
+ SeqString* result,
+ int result_length,
unibrow::Mapping<Converter, 128>* mapping) {
+ DisallowHeapAllocation no_gc;
// We try this twice, once with the assumption that the result is no longer
// than the input and, if that assumption breaks, again with the exact
// length. This may not be pretty, but it is nicer than what was here before
// and I hereby claim my vaffel-is.
//
- // Allocate the resulting string.
- //
// NOTE: This assumes that the upper/lower case of an ASCII
// character is also ASCII. This is currently the case, but it
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
- Object* o;
- { MaybeObject* maybe_o = result_encoding == String::ONE_BYTE_ENCODING
- ? isolate->heap()->AllocateRawOneByteString(length)
- : isolate->heap()->AllocateRawTwoByteString(length);
- if (!maybe_o->ToObject(&o)) return maybe_o;
- }
- String* result = String::cast(o);
bool has_changed_character = false;
- DisallowHeapAllocation no_gc;
-
// Convert all characters to upper case, assuming that they will fit
// in the buffer
Access<ConsStringIteratorOp> op(
isolate->runtime_state()->string_iterator());
- StringCharacterStream stream(s, op.value());
+ StringCharacterStream stream(string, op.value());
unibrow::uchar chars[Converter::kMaxWidth];
// We can assume that the string is not empty
uc32 current = stream.GetNext();
- // y with umlauts is the only character that stops fitting into one-byte
- // when converting to uppercase.
- static const uc32 yuml_code = 0xff;
- bool ignore_yuml = result->IsSeqTwoByteString() || Converter::kIsToLower;
- for (int i = 0; i < length;) {
+ bool ignore_overflow = Converter::kIsToLower || result->IsSeqTwoByteString();
+ for (int i = 0; i < result_length;) {
bool has_next = stream.HasMore();
uc32 next = has_next ? stream.GetNext() : 0;
int char_length = mapping->get(current, next, chars);
@@ -6335,14 +6431,15 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
// The case conversion of this character is the character itself.
result->Set(i, current);
i++;
- } else if (char_length == 1 && (ignore_yuml || current != yuml_code)) {
+ } else if (char_length == 1 &&
+ (ignore_overflow || !ToUpperOverflows(current))) {
// Common case: converting the letter resulted in one character.
ASSERT(static_cast<uc32>(chars[0]) != current);
result->Set(i, chars[0]);
has_changed_character = true;
i++;
- } else if (length == input_string_length) {
- bool found_yuml = (current == yuml_code);
+ } else if (result_length == string->length()) {
+ bool overflows = ToUpperOverflows(current);
// We've assumed that the result would be as long as the
// input but here is a character that converts to several
// characters. No matter, we calculate the exact length
@@ -6362,7 +6459,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
int current_length = i + char_length + next_length;
while (stream.HasMore()) {
current = stream.GetNext();
- found_yuml |= (current == yuml_code);
+ overflows |= ToUpperOverflows(current);
// NOTE: we use 0 as the next character here because, while
// the next character may affect what a character converts to,
// it does not in any case affect the length of what it convert
@@ -6370,15 +6467,15 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
int char_length = mapping->get(current, 0, chars);
if (char_length == 0) char_length = 1;
current_length += char_length;
- if (current_length > Smi::kMaxValue) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x13);
+ if (current_length > String::kMaxLength) {
+ AllowHeapAllocation allocate_error_and_return;
+ return isolate->ThrowInvalidStringLength();
}
}
// Try again with the real length. Return signed if we need
- // to allocate a two-byte string for y-umlaut to uppercase.
- return (found_yuml && !ignore_yuml) ? Smi::FromInt(-current_length)
- : Smi::FromInt(current_length);
+ // to allocate a two-byte string for to uppercase.
+ return (overflows && !ignore_overflow) ? Smi::FromInt(-current_length)
+ : Smi::FromInt(current_length);
} else {
for (int j = 0; j < char_length; j++) {
result->Set(i, chars[j]);
@@ -6395,7 +6492,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
// we simple return the result and let the converted string
// become garbage; there is no reason to keep two identical strings
// alive.
- return s;
+ return string;
}
}
@@ -6426,7 +6523,7 @@ static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
#ifdef DEBUG
static bool CheckFastAsciiConvert(char* dst,
- char* src,
+ const char* src,
int length,
bool changed,
bool is_to_lower) {
@@ -6449,12 +6546,12 @@ static bool CheckFastAsciiConvert(char* dst,
template<class Converter>
static bool FastAsciiConvert(char* dst,
- char* src,
+ const char* src,
int length,
bool* changed_out) {
#ifdef DEBUG
char* saved_dst = dst;
- char* saved_src = src;
+ const char* saved_src = src;
#endif
DisallowHeapAllocation no_gc;
// We rely on the distance between upper and lower case letters
@@ -6465,12 +6562,12 @@ static bool FastAsciiConvert(char* dst,
static const char hi = Converter::kIsToLower ? 'Z' + 1 : 'z' + 1;
bool changed = false;
uintptr_t or_acc = 0;
- char* const limit = src + length;
+ const char* const limit = src + length;
#ifdef V8_HOST_CAN_READ_UNALIGNED
// Process the prefix of the input that requires no conversion one
// (machine) word at a time.
while (src <= limit - sizeof(uintptr_t)) {
- uintptr_t w = *reinterpret_cast<uintptr_t*>(src);
+ const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
or_acc |= w;
if (AsciiRangeMask(w, lo, hi) != 0) {
changed = true;
@@ -6483,7 +6580,7 @@ static bool FastAsciiConvert(char* dst,
// Process the remainder of the input performing conversion when
// required one word at a time.
while (src <= limit - sizeof(uintptr_t)) {
- uintptr_t w = *reinterpret_cast<uintptr_t*>(src);
+ const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
or_acc |= w;
uintptr_t m = AsciiRangeMask(w, lo, hi);
// The mask has high (7th) bit set in every byte that needs
@@ -6526,13 +6623,12 @@ MUST_USE_RESULT static MaybeObject* ConvertCase(
Arguments args,
Isolate* isolate,
unibrow::Mapping<Converter, 128>* mapping) {
- SealHandleScope shs(isolate);
- CONVERT_ARG_CHECKED(String, s, 0);
- s = s->TryFlattenGetString();
-
- const int length = s->length();
+ HandleScope handle_scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+ s = FlattenGetString(s);
+ int length = s->length();
// Assume that the string is not empty; we need this assumption later
- if (length == 0) return s;
+ if (length == 0) return *s;
// Simpler handling of ASCII strings.
//
@@ -6540,42 +6636,46 @@ MUST_USE_RESULT static MaybeObject* ConvertCase(
// character is also ASCII. This is currently the case, but it
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
- if (s->IsSeqOneByteString()) {
- Object* o;
- { MaybeObject* maybe_o = isolate->heap()->AllocateRawOneByteString(length);
- if (!maybe_o->ToObject(&o)) return maybe_o;
- }
- SeqOneByteString* result = SeqOneByteString::cast(o);
+ if (s->IsOneByteRepresentationUnderneath()) {
+ Handle<SeqOneByteString> result =
+ isolate->factory()->NewRawOneByteString(length);
+ ASSERT(!result.is_null()); // Same length as input.
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat_content = s->GetFlatContent();
+ ASSERT(flat_content.IsFlat());
bool has_changed_character = false;
bool is_ascii = FastAsciiConvert<Converter>(
reinterpret_cast<char*>(result->GetChars()),
- reinterpret_cast<char*>(SeqOneByteString::cast(s)->GetChars()),
+ reinterpret_cast<const char*>(flat_content.ToOneByteVector().start()),
length,
&has_changed_character);
// If not ASCII, we discard the result and take the 2 byte path.
- if (is_ascii) {
- return has_changed_character ? result : s;
- }
+ if (is_ascii) return has_changed_character ? *result : *s;
}
- String::Encoding result_encoding = s->IsOneByteRepresentation()
- ? String::ONE_BYTE_ENCODING : String::TWO_BYTE_ENCODING;
- Object* answer;
- { MaybeObject* maybe_answer = ConvertCaseHelper(
- isolate, s, result_encoding, length, length, mapping);
- if (!maybe_answer->ToObject(&answer)) return maybe_answer;
+ Handle<SeqString> result;
+ if (s->IsOneByteRepresentation()) {
+ result = isolate->factory()->NewRawOneByteString(length);
+ } else {
+ result = isolate->factory()->NewRawTwoByteString(length);
}
- if (answer->IsSmi()) {
- int new_length = Smi::cast(answer)->value();
- if (new_length < 0) {
- result_encoding = String::TWO_BYTE_ENCODING;
- new_length = -new_length;
- }
- MaybeObject* maybe_answer = ConvertCaseHelper(
- isolate, s, result_encoding, new_length, length, mapping);
- if (!maybe_answer->ToObject(&answer)) return maybe_answer;
+ ASSERT(!result.is_null()); // Same length as input.
+
+ MaybeObject* maybe = ConvertCaseHelper(isolate, *s, *result, length, mapping);
+ Object* answer;
+ if (!maybe->ToObject(&answer)) return maybe;
+ if (answer->IsString()) return answer;
+
+ ASSERT(answer->IsSmi());
+ length = Smi::cast(answer)->value();
+ if (s->IsOneByteRepresentation() && length > 0) {
+ result = isolate->factory()->NewRawOneByteString(length);
+ } else {
+ if (length < 0) length = -length;
+ result = isolate->factory()->NewRawTwoByteString(length);
}
- return answer;
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return ConvertCaseHelper(isolate, *s, *result, length, mapping);
}
@@ -6591,11 +6691,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToUpperCase) {
}
-static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
- return unibrow::WhiteSpace::Is(c) || c == 0x200b || c == 0xfeff;
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -6608,15 +6703,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
int length = string->length();
int left = 0;
+ UnicodeCache* unicode_cache = isolate->unicode_cache();
if (trimLeft) {
- while (left < length && IsTrimWhiteSpace(string->Get(left))) {
+ while (left < length &&
+ unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(left))) {
left++;
}
}
int right = length;
if (trimRight) {
- while (right > left && IsTrimWhiteSpace(string->Get(right - 1))) {
+ while (right > left &&
+ unicode_cache->IsWhiteSpaceOrLineTerminator(
+ string->Get(right - 1))) {
right--;
}
}
@@ -6818,7 +6917,7 @@ bool Runtime::IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NumberToString) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -6829,7 +6928,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToStringSkipCache) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NumberToStringSkipCache) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -6854,24 +6953,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToInteger) {
}
-// ES6 draft 9.1.11
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPositiveInteger) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(number, 0);
-
- // We do not include 0 so that we don't have to treat +0 / -0 cases.
- if (number > 0 && number <= Smi::kMaxValue) {
- return Smi::FromInt(static_cast<int>(number));
- }
- if (number <= 0) {
- return Smi::FromInt(0);
- }
- return isolate->heap()->NumberFromDouble(DoubleToInteger(number));
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToIntegerMapMinusZero) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -6916,7 +6997,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSInt32) {
// Converts a Number to a Smi, if possible. Returns NaN if the number is not
// a small integer.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NumberToSmi) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -6935,7 +7016,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateHeapNumber) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_AllocateHeapNumber) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
return isolate->heap()->AllocateHeapNumber(0);
@@ -7022,13 +7103,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberImul) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_StringAdd) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
CONVERT_ARG_HANDLE_CHECKED(String, str2, 1);
isolate->counters()->string_add_runtime()->Increment();
- return *isolate->factory()->NewConsString(str1, str2);
+ Handle<String> result = isolate->factory()->NewConsString(str1, str2);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -7075,10 +7158,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
- if (!args[1]->IsSmi()) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x14);
- }
+ if (!args[1]->IsSmi()) return isolate->ThrowInvalidStringLength();
int array_length = args.smi_at(1);
CONVERT_ARG_HANDLE_CHECKED(String, special, 2);
@@ -7152,8 +7232,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
}
if (increment > String::kMaxLength - position) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x15);
+ return isolate->ThrowInvalidStringLength();
}
position += increment;
}
@@ -7188,20 +7267,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSArray, array, 0);
- if (!args[1]->IsSmi()) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x16);
- }
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
+ if (!args[1]->IsSmi()) return isolate->ThrowInvalidStringLength();
int array_length = args.smi_at(1);
- CONVERT_ARG_CHECKED(String, separator, 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
+ RUNTIME_ASSERT(array->HasFastObjectElements());
- if (!array->HasFastObjectElements()) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
- FixedArray* fixed_array = FixedArray::cast(array->elements());
+ Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()));
if (fixed_array->length() < array_length) {
array_length = fixed_array->length();
}
@@ -7210,38 +7284,35 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
return isolate->heap()->empty_string();
} else if (array_length == 1) {
Object* first = fixed_array->get(0);
- if (first->IsString()) return first;
+ RUNTIME_ASSERT(first->IsString());
+ return first;
}
int separator_length = separator->length();
int max_nof_separators =
(String::kMaxLength + separator_length - 1) / separator_length;
if (max_nof_separators < (array_length - 1)) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x17);
+ return isolate->ThrowInvalidStringLength();
}
int length = (array_length - 1) * separator_length;
for (int i = 0; i < array_length; i++) {
Object* element_obj = fixed_array->get(i);
- if (!element_obj->IsString()) {
- // TODO(1161): handle this case.
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
+ RUNTIME_ASSERT(element_obj->IsString());
String* element = String::cast(element_obj);
int increment = element->length();
if (increment > String::kMaxLength - length) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException(0x18);
+ STATIC_ASSERT(String::kMaxLength < kMaxInt);
+ length = kMaxInt; // Provoke exception;
+ break;
}
length += increment;
}
- Object* object;
- { MaybeObject* maybe_object =
- isolate->heap()->AllocateRawTwoByteString(length);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- SeqTwoByteString* answer = SeqTwoByteString::cast(object);
+ Handle<SeqTwoByteString> answer =
+ isolate->factory()->NewRawTwoByteString(length);
+ RETURN_IF_EMPTY_HANDLE(isolate, answer);
+
+ DisallowHeapAllocation no_gc;
uc16* sink = answer->GetChars();
#ifdef DEBUG
@@ -7249,13 +7320,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
#endif
String* first = String::cast(fixed_array->get(0));
+ String* seperator_raw = *separator;
int first_length = first->length();
String::WriteToFlat(first, sink, 0, first_length);
sink += first_length;
for (int i = 1; i < array_length; i++) {
ASSERT(sink + separator_length <= end);
- String::WriteToFlat(separator, sink, 0, separator_length);
+ String::WriteToFlat(seperator_raw, sink, 0, separator_length);
sink += separator_length;
String* element = String::cast(fixed_array->get(i));
@@ -7268,7 +7340,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
// Use %_FastAsciiArrayJoin instead.
ASSERT(!answer->IsOneByteRepresentation());
- return answer;
+ return *answer;
}
template <typename Char>
@@ -7327,12 +7399,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
// Find total length of join result.
int string_length = 0;
bool is_ascii = separator->IsOneByteRepresentation();
- int max_string_length;
- if (is_ascii) {
- max_string_length = SeqOneByteString::kMaxLength;
- } else {
- max_string_length = SeqTwoByteString::kMaxLength;
- }
bool overflow = false;
CONVERT_NUMBER_CHECKED(int, elements_length,
Int32, elements_array->length());
@@ -7345,10 +7411,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
int length = string->length();
if (is_ascii && !string->IsOneByteRepresentation()) {
is_ascii = false;
- max_string_length = SeqTwoByteString::kMaxLength;
}
- if (length > max_string_length ||
- max_string_length - length < string_length) {
+ if (length > String::kMaxLength ||
+ String::kMaxLength - length < string_length) {
overflow = true;
break;
}
@@ -7358,7 +7423,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
if (!overflow && separator_length > 0) {
if (array_length <= 0x7fffffffu) {
int separator_count = static_cast<int>(array_length) - 1;
- int remaining_length = max_string_length - string_length;
+ int remaining_length = String::kMaxLength - string_length;
if ((remaining_length / separator_length) >= separator_count) {
string_length += separator_length * (array_length - 1);
} else {
@@ -7376,9 +7441,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
// Throw an exception if the resulting string is too large. See
// https://code.google.com/p/chromium/issues/detail?id=336820
// for details.
- return isolate->Throw(*isolate->factory()->
- NewRangeError("invalid_string_length",
- HandleVector<Object>(NULL, 0)));
+ return isolate->ThrowInvalidStringLength();
}
if (is_ascii) {
@@ -7663,7 +7726,7 @@ static Object* FlatStringCompare(String* x, String* y) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_StringCompare) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -7698,33 +7761,48 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_acos) {
+#define RUNTIME_UNARY_MATH(NAME) \
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_##NAME) { \
+ SealHandleScope shs(isolate); \
+ ASSERT(args.length() == 1); \
+ isolate->counters()->math_##NAME()->Increment(); \
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0); \
+ return isolate->heap()->AllocateHeapNumber(std::NAME(x)); \
+}
+
+RUNTIME_UNARY_MATH(acos)
+RUNTIME_UNARY_MATH(asin)
+RUNTIME_UNARY_MATH(atan)
+RUNTIME_UNARY_MATH(log)
+#undef RUNTIME_UNARY_MATH
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DoubleHi) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
- isolate->counters()->math_acos()->Increment();
-
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->AllocateHeapNumber(std::acos(x));
+ uint64_t integer = double_to_uint64(x);
+ integer = (integer >> 32) & 0xFFFFFFFFu;
+ return isolate->heap()->NumberFromDouble(static_cast<int32_t>(integer));
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_asin) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DoubleLo) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
- isolate->counters()->math_asin()->Increment();
-
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->AllocateHeapNumber(std::asin(x));
+ return isolate->heap()->NumberFromDouble(
+ static_cast<int32_t>(double_to_uint64(x) & 0xFFFFFFFFu));
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ConstructDouble) {
SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_atan()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->AllocateHeapNumber(std::atan(x));
+ ASSERT(args.length() == 2);
+ CONVERT_NUMBER_CHECKED(uint32_t, hi, Uint32, args[0]);
+ CONVERT_NUMBER_CHECKED(uint32_t, lo, Uint32, args[1]);
+ uint64_t result = (static_cast<uint64_t>(hi) << 32) | lo;
+ return isolate->heap()->AllocateHeapNumber(uint64_to_double(result));
}
@@ -7775,16 +7853,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_log()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->AllocateHeapNumber(std::log(x));
-}
-
-
// Slow version of Math.pow. We check for fast paths for special cases.
// Used if SSE2/VFP3 is not available.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
@@ -7880,6 +7948,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_fround) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ float xf = static_cast<float>(x);
+ return isolate->heap()->AllocateHeapNumber(xf);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -7928,7 +8006,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateSetValue) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewArgumentsFast) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -7946,11 +8024,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
Handle<FixedArray> parameter_map =
isolate->factory()->NewFixedArray(mapped_count + 2, NOT_TENURED);
parameter_map->set_map(
- isolate->heap()->non_strict_arguments_elements_map());
+ isolate->heap()->sloppy_arguments_elements_map());
Handle<Map> old_map(result->map());
Handle<Map> new_map = isolate->factory()->CopyMap(old_map);
- new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
+ new_map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS);
result->set_map(*new_map);
result->set_elements(*parameter_map);
@@ -8023,7 +8101,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewStrictArgumentsFast) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
@@ -8056,7 +8134,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosureFromStubFailure) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewClosureFromStubFailure) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
@@ -8070,7 +8148,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosureFromStubFailure) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewClosure) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(Context, context, 0);
@@ -8261,12 +8339,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
-
- Handle<Object> constructor = args.at<Object>(0);
-
+static MaybeObject* Runtime_NewObjectHelper(Isolate* isolate,
+ Handle<Object> constructor,
+ Handle<AllocationSite> site) {
// If the constructor isn't a proper function we throw a type error.
if (!constructor->IsJSFunction()) {
Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
@@ -8324,7 +8399,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
shared->CompleteInobjectSlackTracking();
}
- Handle<JSObject> result = isolate->factory()->NewJSObject(function);
+ Handle<JSObject> result;
+ if (site.is_null()) {
+ result = isolate->factory()->NewJSObject(function);
+ } else {
+ result = isolate->factory()->NewJSObjectWithMemento(function, site);
+ }
RETURN_IF_EMPTY_HANDLE(isolate, result);
isolate->counters()->constructed_objects()->Increment();
@@ -8334,7 +8414,35 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FinalizeInstanceSize) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewObject) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+
+ Handle<Object> constructor = args.at<Object>(0);
+ return Runtime_NewObjectHelper(isolate,
+ constructor,
+ Handle<AllocationSite>::null());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewObjectWithAllocationSite) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+
+ Handle<Object> constructor = args.at<Object>(1);
+ Handle<Object> feedback = args.at<Object>(0);
+ Handle<AllocationSite> site;
+ if (feedback->IsAllocationSite()) {
+ // The feedback can be an AllocationSite or undefined.
+ site = Handle<AllocationSite>::cast(feedback);
+ }
+ return Runtime_NewObjectHelper(isolate,
+ constructor,
+ site);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_FinalizeInstanceSize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -8345,7 +8453,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FinalizeInstanceSize) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileUnoptimized) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_CompileUnoptimized) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -8374,7 +8482,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileUnoptimized) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileOptimized) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_CompileOptimized) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
Handle<JSFunction> function = args.at<JSFunction>(0);
@@ -8437,7 +8545,7 @@ class ActivationsFinder : public ThreadVisitor {
};
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyStubFailure) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NotifyStubFailure) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
@@ -8447,7 +8555,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyStubFailure) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NotifyDeoptimized) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
RUNTIME_ASSERT(args[0]->IsSmi());
@@ -8490,6 +8598,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
PrintF("]\n");
}
function->ReplaceCode(function->shared()->code());
+ // Evict optimized code for this function from the cache so that it
+ // doesn't get used for new closures.
+ function->shared()->EvictFromOptimizedCodeMap(*optimized_code,
+ "notify deoptimized");
}
} else {
// TODO(titzer): we should probably do DeoptimizeCodeList(code)
@@ -8497,10 +8609,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
// If there is an index by shared function info, all the better.
Deoptimizer::DeoptimizeFunction(*function);
}
- // Evict optimized code for this function from the cache so that it doesn't
- // get used for new closures.
- function->shared()->EvictFromOptimizedCodeMap(*optimized_code,
- "notify deoptimized");
return isolate->heap()->undefined_value();
}
@@ -8525,7 +8633,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearFunctionTypeFeedback) {
Code* unoptimized = function->shared()->code();
if (unoptimized->kind() == Code::FUNCTION) {
unoptimized->ClearInlineCaches();
- unoptimized->ClearTypeFeedbackCells(isolate->heap());
+ unoptimized->ClearTypeFeedbackInfo(isolate->heap());
}
return isolate->heap()->undefined_value();
}
@@ -8587,7 +8695,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NeverOptimizeFunction) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
- ASSERT(!function->IsOptimized());
function->shared()->set_optimization_disabled(true);
return isolate->heap()->undefined_value();
}
@@ -8630,6 +8737,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_UnblockConcurrentRecompilation) {
RUNTIME_ASSERT(FLAG_block_concurrent_recompilation);
+ RUNTIME_ASSERT(isolate->concurrent_recompilation_enabled());
isolate->optimizing_compiler_thread()->Unblock();
return isolate->heap()->undefined_value();
}
@@ -8768,7 +8876,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
PrintF(" at AST id %d]\n", ast_id.ToInt());
}
- function->ReplaceCode(function->shared()->code());
+ if (!function->IsOptimized()) {
+ function->ReplaceCode(function->shared()->code());
+ }
return NULL;
}
@@ -8869,6 +8979,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
for (int i = 0; i < argc; ++i) {
argv[i] = Object::GetElement(isolate, arguments, offset + i);
+ RETURN_IF_EMPTY_HANDLE(isolate, argv[i]);
}
bool threw;
@@ -8896,7 +9007,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructorDelegate) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewGlobalContext) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewGlobalContext) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -8915,7 +9026,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewGlobalContext) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_NewFunctionContext) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -8925,7 +9036,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_PushWithContext) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
JSReceiver* extension_object;
@@ -8969,7 +9080,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_PushCatchContext) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
String* name = String::cast(args[0]);
@@ -8995,7 +9106,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_PushBlockContext) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
ScopeInfo* scope_info = ScopeInfo::cast(args[0]);
@@ -9027,7 +9138,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSModule) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushModuleContext) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_PushModuleContext) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(index, 0);
@@ -9062,7 +9173,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushModuleContext) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_DeclareModules) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, descriptions, 0);
@@ -9082,7 +9193,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) {
case VAR:
case LET:
case CONST:
- case CONST_HARMONY: {
+ case CONST_LEGACY: {
PropertyAttributes attr =
IsImmutableVariableMode(mode) ? FROZEN : SEALED;
Handle<AccessorInfo> info =
@@ -9095,7 +9206,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) {
case MODULE: {
Object* referenced_context = Context::cast(host_context)->get(index);
Handle<JSModule> value(Context::cast(referenced_context)->module());
- JSReceiver::SetProperty(module, name, value, FROZEN, kStrictMode);
+ JSReceiver::SetProperty(module, name, value, FROZEN, STRICT);
break;
}
case INTERNAL:
@@ -9115,7 +9226,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteContextSlot) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_DeleteContextSlot) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
@@ -9301,26 +9412,24 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
}
-RUNTIME_FUNCTION(ObjectPair, Runtime_LoadContextSlot) {
+RUNTIME_FUNCTION(ObjectPair, RuntimeHidden_LoadContextSlot) {
return LoadContextSlotHelper(args, isolate, true);
}
-RUNTIME_FUNCTION(ObjectPair, Runtime_LoadContextSlotNoReferenceError) {
+RUNTIME_FUNCTION(ObjectPair, RuntimeHidden_LoadContextSlotNoReferenceError) {
return LoadContextSlotHelper(args, isolate, false);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_StoreContextSlot) {
HandleScope scope(isolate);
ASSERT(args.length() == 4);
Handle<Object> value(args[0], isolate);
CONVERT_ARG_HANDLE_CHECKED(Context, context, 1);
CONVERT_ARG_HANDLE_CHECKED(String, name, 2);
- CONVERT_LANGUAGE_MODE_ARG(language_mode, 3);
- StrictModeFlag strict_mode = (language_mode == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
+ CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 3);
int index;
PropertyAttributes attributes;
@@ -9347,7 +9456,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
if ((attributes & READ_ONLY) == 0) {
// Context is a fixed array and set cannot fail.
context->set(index, *value);
- } else if (strict_mode == kStrictMode) {
+ } else if (strict_mode == STRICT) {
// Setting read only property in strict mode.
Handle<Object> error =
isolate->factory()->NewTypeError("strict_cannot_assign",
@@ -9369,25 +9478,25 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
// The property was not found.
ASSERT(attributes == ABSENT);
- if (strict_mode == kStrictMode) {
+ if (strict_mode == STRICT) {
// Throw in strict mode (assignment to undefined variable).
Handle<Object> error =
isolate->factory()->NewReferenceError(
"not_defined", HandleVector(&name, 1));
return isolate->Throw(*error);
}
- // In non-strict mode, the property is added to the global object.
+ // In sloppy mode, the property is added to the global object.
attributes = NONE;
object = Handle<JSReceiver>(isolate->context()->global_object());
}
// Set the property if it's not read only or doesn't yet exist.
if ((attributes & READ_ONLY) == 0 ||
- (object->GetLocalPropertyAttribute(*name) == ABSENT)) {
+ (JSReceiver::GetLocalPropertyAttribute(object, name) == ABSENT)) {
RETURN_IF_EMPTY_HANDLE(
isolate,
JSReceiver::SetProperty(object, name, value, NONE, strict_mode));
- } else if (strict_mode == kStrictMode && (attributes & READ_ONLY) != 0) {
+ } else if (strict_mode == STRICT && (attributes & READ_ONLY) != 0) {
// Setting read only property in strict mode.
Handle<Object> error =
isolate->factory()->NewTypeError(
@@ -9398,7 +9507,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Throw) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_Throw) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -9406,7 +9515,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Throw) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ReThrow) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ReThrow) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -9414,14 +9523,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ReThrow) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PromoteScheduledException) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_PromoteScheduledException) {
SealHandleScope shs(isolate);
ASSERT_EQ(0, args.length());
return isolate->PromoteScheduledException();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowReferenceError) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ThrowReferenceError) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -9433,7 +9542,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowReferenceError) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowNotDateError) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ThrowNotDateError) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
return isolate->Throw(*isolate->factory()->NewTypeError(
@@ -9441,19 +9550,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowNotDateError) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowMessage) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ThrowMessage) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_SMI_ARG_CHECKED(message_id, 0);
const char* message = GetBailoutReason(
static_cast<BailoutReason>(message_id));
- Handle<Name> message_handle =
+ Handle<String> message_handle =
isolate->factory()->NewStringFromAscii(CStrVector(message));
+ RETURN_IF_EMPTY_HANDLE(isolate, message_handle);
return isolate->Throw(*message_handle);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_StackGuard) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
@@ -9466,7 +9576,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TryInstallOptimizedCode) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_TryInstallOptimizedCode) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
@@ -9483,7 +9593,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TryInstallOptimizedCode) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Interrupt) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_Interrupt) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
return Execution::HandleStackGuardInterrupt(isolate);
@@ -9628,8 +9738,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimezone) {
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- int64_t time = isolate->date_cache()->EquivalentTime(static_cast<int64_t>(x));
- const char* zone = OS::LocalTimezone(static_cast<double>(time));
+ const char* zone =
+ isolate->date_cache()->LocalTimezone(static_cast<int64_t>(x));
return isolate->heap()->AllocateStringFromUtf8(CStrVector(zone));
}
@@ -9645,6 +9755,27 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateToUTC) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateCacheVersion) {
+ HandleScope hs(isolate);
+ ASSERT(args.length() == 0);
+ if (!isolate->eternal_handles()->Exists(EternalHandles::DATE_CACHE_VERSION)) {
+ Handle<FixedArray> date_cache_version =
+ isolate->factory()->NewFixedArray(1, TENURED);
+ date_cache_version->set(0, Smi::FromInt(0));
+ isolate->eternal_handles()->CreateSingleton(
+ isolate, *date_cache_version, EternalHandles::DATE_CACHE_VERSION);
+ }
+ Handle<FixedArray> date_cache_version =
+ Handle<FixedArray>::cast(isolate->eternal_handles()->GetSingleton(
+ EternalHandles::DATE_CACHE_VERSION));
+ // Return result as a JS array.
+ Handle<JSObject> result =
+ isolate->factory()->NewJSObject(isolate->array_function());
+ JSArray::SetContent(Handle<JSArray>::cast(result), date_cache_version);
+ return *result;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalReceiver) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -9726,7 +9857,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
ParseRestriction restriction = function_literal_only
? ONLY_SINGLE_FUNCTION_LITERAL : NO_PARSE_RESTRICTION;
Handle<JSFunction> fun = Compiler::GetFunctionFromEval(
- source, context, CLASSIC_MODE, restriction, RelocInfo::kNoPosition);
+ source, context, SLOPPY, restriction, RelocInfo::kNoPosition);
RETURN_IF_EMPTY_HANDLE(isolate, fun);
return *fun;
}
@@ -9735,7 +9866,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
static ObjectPair CompileGlobalEval(Isolate* isolate,
Handle<String> source,
Handle<Object> receiver,
- LanguageMode language_mode,
+ StrictMode strict_mode,
int scope_position) {
Handle<Context> context = Handle<Context>(isolate->context());
Handle<Context> native_context = Handle<Context>(context->native_context());
@@ -9755,14 +9886,14 @@ static ObjectPair CompileGlobalEval(Isolate* isolate,
// and return the compiled function bound in the local context.
static const ParseRestriction restriction = NO_PARSE_RESTRICTION;
Handle<JSFunction> compiled = Compiler::GetFunctionFromEval(
- source, context, language_mode, restriction, scope_position);
+ source, context, strict_mode, restriction, scope_position);
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, compiled,
MakePair(Failure::Exception(), NULL));
return MakePair(*compiled, *receiver);
}
-RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
+RUNTIME_FUNCTION(ObjectPair, RuntimeHidden_ResolvePossiblyDirectEval) {
HandleScope scope(isolate);
ASSERT(args.length() == 5);
@@ -9778,12 +9909,14 @@ RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
return MakePair(*callee, isolate->heap()->undefined_value());
}
- CONVERT_LANGUAGE_MODE_ARG(language_mode, 3);
+ ASSERT(args[3]->IsSmi());
+ ASSERT(args.smi_at(3) == SLOPPY || args.smi_at(3) == STRICT);
+ StrictMode strict_mode = static_cast<StrictMode>(args.smi_at(3));
ASSERT(args[4]->IsSmi());
return CompileGlobalEval(isolate,
args.at<String>(1),
args.at<Object>(2),
- language_mode,
+ strict_mode,
args.smi_at(4));
}
@@ -9811,7 +9944,7 @@ static MaybeObject* Allocate(Isolate* isolate,
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_AllocateInNewSpace) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_SMI_ARG_CHECKED(size, 0);
@@ -9819,7 +9952,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInTargetSpace) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_AllocateInTargetSpace) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(size, 0);
@@ -9848,7 +9981,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
// Strict not needed. Used for cycle detection in Array join implementation.
RETURN_IF_EMPTY_HANDLE(isolate, JSObject::SetFastElement(array, length,
element,
- kNonStrictMode,
+ SLOPPY,
true));
return isolate->heap()->true_value();
}
@@ -9930,11 +10063,9 @@ class ArrayConcatVisitor {
isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
Handle<Map> map;
if (fast_elements_) {
- map = isolate_->factory()->GetElementsTransitionMap(array,
- FAST_HOLEY_ELEMENTS);
+ map = JSObject::GetElementsTransitionMap(array, FAST_HOLEY_ELEMENTS);
} else {
- map = isolate_->factory()->GetElementsTransitionMap(array,
- DICTIONARY_ELEMENTS);
+ map = JSObject::GetElementsTransitionMap(array, DICTIONARY_ELEMENTS);
}
array->set_map(*map);
array->set_length(*length);
@@ -10033,7 +10164,7 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
}
break;
}
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case EXTERNAL_##TYPE##_ELEMENTS: \
case TYPE##_ELEMENTS: \
@@ -10468,7 +10599,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
Handle<JSArray> array = isolate->factory()->NewJSArray(0);
Smi* length = Smi::FromInt(j);
Handle<Map> map;
- map = isolate->factory()->GetElementsTransitionMap(array, kind);
+ map = JSObject::GetElementsTransitionMap(array, kind);
array->set_map(*map);
array->set_length(length);
array->set_elements(*double_storage);
@@ -10532,6 +10663,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) {
// and are followed by non-existing element. Does not change the length
// property.
// Returns the number of non-undefined elements collected.
+// Returns -1 if hole removal is not supported by this method.
RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
@@ -10792,14 +10924,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
uint32_t index;
if (name->AsArrayIndex(&index)) {
Handle<FixedArray> details = isolate->factory()->NewFixedArray(2);
- Object* element_or_char;
- { MaybeObject* maybe_element_or_char =
- Runtime::GetElementOrCharAt(isolate, obj, index);
- if (!maybe_element_or_char->ToObject(&element_or_char)) {
- return maybe_element_or_char;
- }
- }
- details->set(0, element_or_char);
+ Handle<Object> element_or_char =
+ Runtime::GetElementOrCharAt(isolate, obj, index);
+ RETURN_IF_EMPTY_HANDLE(isolate, element_or_char);
+ details->set(0, *element_or_char);
details->set(
1, PropertyDetails(NONE, NORMAL, Representation::None()).AsSmi());
return *isolate->factory()->NewJSArrayWithElements(details);
@@ -10935,8 +11063,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugIndexedInterceptorElementValue) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
RUNTIME_ASSERT(obj->HasIndexedInterceptor());
CONVERT_NUMBER_CHECKED(uint32_t, index, Uint32, args[1]);
-
- return obj->GetElementWithInterceptor(*obj, index);
+ Handle<Object> result = JSObject::GetElementWithInterceptor(obj, obj, index);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -11178,8 +11307,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
VariableMode mode;
InitializationFlag init_flag;
locals->set(i * 2, *name);
- locals->set(i * 2 + 1, context->get(
- scope_info->ContextSlotIndex(*name, &mode, &init_flag)));
+ int context_slot_index =
+ scope_info->ContextSlotIndex(*name, &mode, &init_flag);
+ Object* value = context->get(context_slot_index);
+ locals->set(i * 2 + 1, value);
}
}
@@ -11320,7 +11451,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// THE FRAME ITERATOR TO WRAP THE RECEIVER.
Handle<Object> receiver(it.frame()->receiver(), isolate);
if (!receiver->IsJSObject() &&
- shared->is_classic_mode() &&
+ shared->strict_mode() == SLOPPY &&
!function->IsBuiltin()) {
// If the receiver is not a JSObject and the function is not a
// builtin or strict-mode we have hit an optimization where a
@@ -11346,6 +11477,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
}
+static bool ParameterIsShadowedByContextLocal(Handle<ScopeInfo> info,
+ int index) {
+ VariableMode mode;
+ InitializationFlag flag;
+ return info->ContextSlotIndex(info->ParameterName(index), &mode, &flag) != -1;
+}
+
+
// Create a plain JSObject which materializes the local scope for the specified
// frame.
static Handle<JSObject> MaterializeStackLocalsWithFrameInspector(
@@ -11358,22 +11497,20 @@ static Handle<JSObject> MaterializeStackLocalsWithFrameInspector(
// First fill all parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
- Handle<String> name(scope_info->ParameterName(i));
- VariableMode mode;
- InitializationFlag init_flag;
// Do not materialize the parameter if it is shadowed by a context local.
- if (scope_info->ContextSlotIndex(*name, &mode, &init_flag) != -1) continue;
+ if (ParameterIsShadowedByContextLocal(scope_info, i)) continue;
+ HandleScope scope(isolate);
Handle<Object> value(i < frame_inspector->GetParametersCount()
? frame_inspector->GetParameter(i)
: isolate->heap()->undefined_value(),
isolate);
ASSERT(!value->IsTheHole());
+ Handle<String> name(scope_info->ParameterName(i));
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- Runtime::SetObjectProperty(
- isolate, target, name, value, NONE, kNonStrictMode),
+ Runtime::SetObjectProperty(isolate, target, name, value, NONE, SLOPPY),
Handle<JSObject>());
}
@@ -11385,8 +11522,7 @@ static Handle<JSObject> MaterializeStackLocalsWithFrameInspector(
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- Runtime::SetObjectProperty(
- isolate, target, name, value, NONE, kNonStrictMode),
+ Runtime::SetObjectProperty(isolate, target, name, value, NONE, SLOPPY),
Handle<JSObject>());
}
@@ -11411,10 +11547,13 @@ static void UpdateStackLocalsFromMaterializedObject(Isolate* isolate,
// Parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
+ // Shadowed parameters were not materialized.
+ if (ParameterIsShadowedByContextLocal(scope_info, i)) continue;
+
ASSERT(!frame->GetParameter(i)->IsTheHole());
HandleScope scope(isolate);
- Handle<Object> value = GetProperty(
- isolate, target, Handle<String>(scope_info->ParameterName(i)));
+ Handle<String> name(scope_info->ParameterName(i));
+ Handle<Object> value = GetProperty(isolate, target, name);
frame->SetParameterValue(i, *value);
}
@@ -11469,7 +11608,7 @@ static Handle<JSObject> MaterializeLocalContext(Isolate* isolate,
key,
GetProperty(isolate, ext, key),
NONE,
- kNonStrictMode),
+ SLOPPY),
Handle<JSObject>());
}
}
@@ -11570,8 +11709,7 @@ static bool SetLocalVariableValue(Isolate* isolate,
// We don't expect this to do anything except replacing
// property value.
Runtime::SetObjectProperty(isolate, ext, variable_name, new_value,
- NONE,
- kNonStrictMode);
+ NONE, SLOPPY);
return true;
}
}
@@ -11619,8 +11757,7 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate,
isolate,
Runtime::SetObjectProperty(isolate, closure_scope, key,
GetProperty(isolate, ext, key),
- NONE,
- kNonStrictMode),
+ NONE, SLOPPY),
Handle<JSObject>());
}
}
@@ -11652,8 +11789,7 @@ static bool SetClosureVariableValue(Isolate* isolate,
if (JSReceiver::HasProperty(ext, variable_name)) {
// We don't expect this to do anything except replacing property value.
Runtime::SetObjectProperty(isolate, ext, variable_name, new_value,
- NONE,
- kNonStrictMode);
+ NONE, SLOPPY);
return true;
}
}
@@ -11675,8 +11811,7 @@ static Handle<JSObject> MaterializeCatchScope(Isolate* isolate,
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
Runtime::SetObjectProperty(isolate, catch_scope, name, thrown_object,
- NONE,
- kNonStrictMode),
+ NONE, SLOPPY),
Handle<JSObject>());
return catch_scope;
}
@@ -11760,7 +11895,8 @@ class ScopeIterator {
ScopeIterator(Isolate* isolate,
JavaScriptFrame* frame,
- int inlined_jsframe_index)
+ int inlined_jsframe_index,
+ bool ignore_nested_scopes = false)
: isolate_(isolate),
frame_(frame),
inlined_jsframe_index_(inlined_jsframe_index),
@@ -11784,19 +11920,31 @@ class ScopeIterator {
// Return if ensuring debug info failed.
return;
}
- Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared_info);
- // Find the break point where execution has stopped.
- BreakLocationIterator break_location_iterator(debug_info,
- ALL_BREAK_LOCATIONS);
- // pc points to the instruction after the current one, possibly a break
- // location as well. So the "- 1" to exclude it from the search.
- break_location_iterator.FindBreakLocationFromAddress(frame->pc() - 1);
- if (break_location_iterator.IsExit()) {
- // We are within the return sequence. At the momemt it is not possible to
+ // Currently it takes too much time to find nested scopes due to script
+ // parsing. Sometimes we want to run the ScopeIterator as fast as possible
+ // (for example, while collecting async call stacks on every
+ // addEventListener call), even if we drop some nested scopes.
+ // Later we may optimize getting the nested scopes (cache the result?)
+ // and include nested scopes into the "fast" iteration case as well.
+ if (!ignore_nested_scopes) {
+ Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared_info);
+
+ // Find the break point where execution has stopped.
+ BreakLocationIterator break_location_iterator(debug_info,
+ ALL_BREAK_LOCATIONS);
+ // pc points to the instruction after the current one, possibly a break
+ // location as well. So the "- 1" to exclude it from the search.
+ break_location_iterator.FindBreakLocationFromAddress(frame->pc() - 1);
+
+ // Within the return sequence at the moment it is not possible to
// get a source position which is consistent with the current scope chain.
// Thus all nested with, catch and block contexts are skipped and we only
// provide the function scope.
+ ignore_nested_scopes = break_location_iterator.IsExit();
+ }
+
+ if (ignore_nested_scopes) {
if (scope_info->HasContext()) {
context_ = Handle<Context>(context_->declaration_context(), isolate_);
} else {
@@ -11804,7 +11952,7 @@ class ScopeIterator {
context_ = Handle<Context>(context_->previous(), isolate_);
}
}
- if (scope_info->scope_type() != EVAL_SCOPE) {
+ if (scope_info->scope_type() == FUNCTION_SCOPE) {
nested_scope_chain_.Add(scope_info);
}
} else {
@@ -12189,7 +12337,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetStepInPositions) {
Smi* position_value = Smi::FromInt(break_location_iterator.position());
JSObject::SetElement(array, len,
Handle<Object>(position_value, isolate),
- NONE, kNonStrictMode);
+ NONE, SLOPPY);
len++;
}
}
@@ -12209,7 +12357,7 @@ static const int kScopeDetailsObjectIndex = 1;
static const int kScopeDetailsSize = 2;
-static MaybeObject* MaterializeScopeDetails(Isolate* isolate,
+static Handle<JSObject> MaterializeScopeDetails(Isolate* isolate,
ScopeIterator* it) {
// Calculate the size of the result.
int details_size = kScopeDetailsSize;
@@ -12218,10 +12366,10 @@ static MaybeObject* MaterializeScopeDetails(Isolate* isolate,
// Fill in scope details.
details->set(kScopeDetailsTypeIndex, Smi::FromInt(it->Type()));
Handle<JSObject> scope_object = it->ScopeObject();
- RETURN_IF_EMPTY_HANDLE(isolate, scope_object);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, scope_object, Handle<JSObject>());
details->set(kScopeDetailsObjectIndex, *scope_object);
- return *isolate->factory()->NewJSArrayWithElements(details);
+ return isolate->factory()->NewJSArrayWithElements(details);
}
@@ -12262,7 +12410,58 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeDetails) {
if (it.Done()) {
return isolate->heap()->undefined_value();
}
- return MaterializeScopeDetails(isolate, &it);
+ Handle<JSObject> details = MaterializeScopeDetails(isolate, &it);
+ RETURN_IF_EMPTY_HANDLE(isolate, details);
+ return *details;
+}
+
+
+// Return an array of scope details
+// args[0]: number: break id
+// args[1]: number: frame index
+// args[2]: number: inlined frame index
+// args[3]: boolean: ignore nested scopes
+//
+// The array returned contains arrays with the following information:
+// 0: Scope type
+// 1: Scope object
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetAllScopesDetails) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3 || args.length() == 4);
+
+ // Check arguments.
+ Object* check;
+ { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+ RUNTIME_ARGUMENTS(isolate, args));
+ if (!maybe_check->ToObject(&check)) return maybe_check;
+ }
+ CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
+ CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
+
+ bool ignore_nested_scopes = false;
+ if (args.length() == 4) {
+ CONVERT_BOOLEAN_ARG_CHECKED(flag, 3);
+ ignore_nested_scopes = flag;
+ }
+
+ // Get the frame where the debugging is performed.
+ StackFrame::Id id = UnwrapFrameId(wrapped_id);
+ JavaScriptFrameIterator frame_it(isolate, id);
+ JavaScriptFrame* frame = frame_it.frame();
+
+ List<Handle<JSObject> > result(4);
+ ScopeIterator it(isolate, frame, inlined_jsframe_index, ignore_nested_scopes);
+ for (; !it.Done(); it.Next()) {
+ Handle<JSObject> details = MaterializeScopeDetails(isolate, &it);
+ RETURN_IF_EMPTY_HANDLE(isolate, details);
+ result.Add(details);
+ }
+
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(result.length());
+ for (int i = 0; i < result.length(); ++i) {
+ array->set(i, *result[i]);
+ }
+ return *isolate->factory()->NewJSArrayWithElements(array);
}
@@ -12301,7 +12500,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionScopeDetails) {
return isolate->heap()->undefined_value();
}
- return MaterializeScopeDetails(isolate, &it);
+ Handle<JSObject> details = MaterializeScopeDetails(isolate, &it);
+ RETURN_IF_EMPTY_HANDLE(isolate, details);
+ return *details;
}
@@ -12698,7 +12899,7 @@ static Handle<JSObject> MaterializeArgumentsObject(
isolate->factory()->arguments_string(),
arguments,
::NONE,
- kNonStrictMode);
+ SLOPPY);
return target;
}
@@ -12718,7 +12919,7 @@ static MaybeObject* DebugEvaluate(Isolate* isolate,
Handle<JSFunction> eval_fun =
Compiler::GetFunctionFromEval(source,
context,
- CLASSIC_MODE,
+ SLOPPY,
NO_PARSE_RESTRICTION,
RelocInfo::kNoPosition);
RETURN_IF_EMPTY_HANDLE(isolate, eval_fun);
@@ -12873,7 +13074,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetLoadedScripts) {
// Return result as a JS array.
Handle<JSObject> result =
isolate->factory()->NewJSObject(isolate->array_function());
- isolate->factory()->SetContent(Handle<JSArray>::cast(result), instances);
+ JSArray::SetContent(Handle<JSArray>::cast(result), instances);
return *result;
}
@@ -12954,20 +13155,20 @@ static int DebugReferencedBy(HeapIterator* iterator,
// args[1]: constructor function for instances to exclude (Mirror)
// args[2]: the the maximum number of objects to return
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
// First perform a full GC in order to avoid references from dead objects.
- isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "%DebugReferencedBy");
+ Heap* heap = isolate->heap();
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugReferencedBy");
// The heap iterator reserves the right to do a GC to make the heap iterable.
// Due to the GC above we know it won't need to do that, but it seems cleaner
// to get the heap iterator constructed before we start having unprotected
// Object* locals that are not protected by handles.
// Check parameters.
- CONVERT_ARG_CHECKED(JSObject, target, 0);
- Object* instance_filter = args[1];
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, target, 0);
+ Handle<Object> instance_filter = args.at<Object>(1);
RUNTIME_ASSERT(instance_filter->IsUndefined() ||
instance_filter->IsJSObject());
CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
@@ -12975,40 +13176,36 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
// Get the constructor function for context extension and arguments array.
- JSObject* arguments_boilerplate =
- isolate->context()->native_context()->arguments_boilerplate();
- JSFunction* arguments_function =
- JSFunction::cast(arguments_boilerplate->map()->constructor());
+ Handle<JSObject> arguments_boilerplate(
+ isolate->context()->native_context()->sloppy_arguments_boilerplate());
+ Handle<JSFunction> arguments_function(
+ JSFunction::cast(arguments_boilerplate->map()->constructor()));
// Get the number of referencing objects.
int count;
- Heap* heap = isolate->heap();
HeapIterator heap_iterator(heap);
count = DebugReferencedBy(&heap_iterator,
- target, instance_filter, max_references,
- NULL, 0, arguments_function);
+ *target, *instance_filter, max_references,
+ NULL, 0, *arguments_function);
// Allocate an array to hold the result.
- Object* object;
- { MaybeObject* maybe_object = heap->AllocateFixedArray(count);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- FixedArray* instances = FixedArray::cast(object);
+ Handle<FixedArray> instances = isolate->factory()->NewFixedArray(count);
// Fill the referencing objects.
// AllocateFixedArray above does not make the heap non-iterable.
ASSERT(heap->IsHeapIterable());
HeapIterator heap_iterator2(heap);
count = DebugReferencedBy(&heap_iterator2,
- target, instance_filter, max_references,
- instances, count, arguments_function);
+ *target, *instance_filter, max_references,
+ *instances, count, *arguments_function);
// Return result as JS array.
- Object* result;
- MaybeObject* maybe_result = heap->AllocateJSObject(
+ Handle<JSFunction> constructor(
isolate->context()->native_context()->array_function());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- return JSArray::cast(result)->SetContent(instances);
+
+ Handle<JSObject> result = isolate->factory()->NewJSObject(constructor);
+ JSArray::SetContent(Handle<JSArray>::cast(result), instances);
+ return *result;
}
@@ -13048,7 +13245,7 @@ static int DebugConstructedBy(HeapIterator* iterator,
// args[0]: the constructor to find instances of
// args[1]: the the maximum number of objects to return
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
// First perform a full GC in order to avoid dead objects.
@@ -13056,7 +13253,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugConstructedBy");
// Check parameters.
- CONVERT_ARG_CHECKED(JSFunction, constructor, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[1]);
RUNTIME_ASSERT(max_references >= 0);
@@ -13064,34 +13261,29 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
int count;
HeapIterator heap_iterator(heap);
count = DebugConstructedBy(&heap_iterator,
- constructor,
+ *constructor,
max_references,
NULL,
0);
// Allocate an array to hold the result.
- Object* object;
- { MaybeObject* maybe_object = heap->AllocateFixedArray(count);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- FixedArray* instances = FixedArray::cast(object);
+ Handle<FixedArray> instances = isolate->factory()->NewFixedArray(count);
- ASSERT(isolate->heap()->IsHeapIterable());
+ ASSERT(heap->IsHeapIterable());
// Fill the referencing objects.
HeapIterator heap_iterator2(heap);
count = DebugConstructedBy(&heap_iterator2,
- constructor,
+ *constructor,
max_references,
- instances,
+ *instances,
count);
// Return result as JS array.
- Object* result;
- { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
+ Handle<JSFunction> array_function(
isolate->context()->native_context()->array_function());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- return JSArray::cast(result)->SetContent(instances);
+ Handle<JSObject> result = isolate->factory()->NewJSObject(array_function);
+ JSArray::SetContent(Handle<JSArray>::cast(result), instances);
+ return *result;
}
@@ -13669,14 +13861,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLanguageTagVariants) {
Handle<Name> base =
isolate->factory()->NewStringFromAscii(CStrVector("base"));
for (unsigned int i = 0; i < length; ++i) {
- MaybeObject* maybe_string = input->GetElement(isolate, i);
- Object* locale_id;
- if (!maybe_string->ToObject(&locale_id) || !locale_id->IsString()) {
+ Handle<Object> locale_id = Object::GetElement(isolate, input, i);
+ RETURN_IF_EMPTY_HANDLE(isolate, locale_id);
+ if (!locale_id->IsString()) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
}
v8::String::Utf8Value utf8_locale_id(
- v8::Utils::ToLocal(Handle<String>(String::cast(locale_id))));
+ v8::Utils::ToLocal(Handle<String>::cast(locale_id)));
UErrorCode error = U_ZERO_ERROR;
@@ -14316,9 +14508,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
SealHandleScope shs(isolate);
- ASSERT(args.length() == 2);
- OS::PrintError("abort: %s\n",
- reinterpret_cast<char*>(args[0]) + args.smi_at(1));
+ ASSERT(args.length() == 1);
+ CONVERT_SMI_ARG_CHECKED(message_id, 0);
+ const char* message = GetBailoutReason(
+ static_cast<BailoutReason>(message_id));
+ OS::PrintError("abort: %s\n", message);
isolate->PrintStack(stderr);
OS::Abort();
UNREACHABLE();
@@ -14372,7 +14566,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TryMigrateInstance) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_GetFromCache) {
SealHandleScope shs(isolate);
// This is only called from codegen, so checks might be more lax.
CONVERT_ARG_CHECKED(JSFunctionResultCache, cache, 0);
@@ -14494,8 +14688,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
#define COUNT_ENTRY(Name, argc, ressize) + 1
int entry_count = 0
RUNTIME_FUNCTION_LIST(COUNT_ENTRY)
- INLINE_FUNCTION_LIST(COUNT_ENTRY)
- INLINE_RUNTIME_FUNCTION_LIST(COUNT_ENTRY);
+ RUNTIME_HIDDEN_FUNCTION_LIST(COUNT_ENTRY)
+ INLINE_FUNCTION_LIST(COUNT_ENTRY);
#undef COUNT_ENTRY
Factory* factory = isolate->factory();
Handle<FixedArray> elements = factory->NewFixedArray(entry_count);
@@ -14521,9 +14715,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
}
inline_runtime_functions = false;
RUNTIME_FUNCTION_LIST(ADD_ENTRY)
+ // Calling hidden runtime functions should just throw.
+ RUNTIME_HIDDEN_FUNCTION_LIST(ADD_ENTRY)
inline_runtime_functions = true;
INLINE_FUNCTION_LIST(ADD_ENTRY)
- INLINE_RUNTIME_FUNCTION_LIST(ADD_ENTRY)
#undef ADD_ENTRY
ASSERT_EQ(index, entry_count);
Handle<JSArray> result = factory->NewJSArrayWithElements(elements);
@@ -14532,16 +14727,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
#endif
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Log) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_Log) {
+ HandleScope handle_scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(String, format, 0);
- CONVERT_ARG_CHECKED(JSArray, elms, 1);
- DisallowHeapAllocation no_gc;
- String::FlatContent format_content = format->GetFlatContent();
- RUNTIME_ASSERT(format_content.IsAscii());
- Vector<const uint8_t> chars = format_content.ToOneByteVector();
- isolate->logger()->LogRuntime(Vector<const char>::cast(chars), elms);
+ CONVERT_ARG_HANDLE_CHECKED(String, format, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, elms, 1);
+
+ SmartArrayPointer<char> format_chars = format->ToCString();
+ isolate->logger()->LogRuntime(
+ Vector<const char>(format_chars.get(), format->length()), elms);
return isolate->heap()->undefined_value();
}
@@ -14564,7 +14758,7 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOrObjectElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastHoleyElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(NonStrictArgumentsElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(SloppyArgumentsElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalArrayElements)
// Properties test sitting with elements tests - not fooling anyone.
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
@@ -14583,6 +14777,17 @@ TYPED_ARRAYS(TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
#undef TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
+#define FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION(Type, type, TYPE, ctype, s) \
+ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasFixed##Type##Elements) { \
+ CONVERT_ARG_CHECKED(JSObject, obj, 0); \
+ return isolate->heap()->ToBoolean(obj->HasFixed##Type##Elements()); \
+ }
+
+TYPED_ARRAYS(FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
+
+#undef FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -14647,6 +14852,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetMicrotaskPending) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RunMicrotasks) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
+ if (isolate->microtask_pending())
+ Execution::RunMicrotasks(isolate);
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetMicrotaskState) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 0);
+ return isolate->heap()->microtask_state();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetObservationState) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
@@ -14685,12 +14906,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAccessAllowedForObserver) {
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, observer, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 1);
- ASSERT(object->IsAccessCheckNeeded());
+ ASSERT(object->map()->is_access_check_needed());
Handle<Object> key = args.at<Object>(2);
SaveContext save(isolate);
isolate->set_context(observer->context());
- if (!isolate->MayNamedAccess(*object, isolate->heap()->undefined_value(),
- v8::ACCESS_KEYS)) {
+ if (!isolate->MayNamedAccessWrapper(object,
+ isolate->factory()->undefined_value(),
+ v8::ACCESS_KEYS)) {
return isolate->heap()->false_value();
}
bool access_allowed = false;
@@ -14698,11 +14920,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAccessAllowedForObserver) {
if (key->ToArrayIndex(&index) ||
(key->IsString() && String::cast(*key)->AsArrayIndex(&index))) {
access_allowed =
- isolate->MayIndexedAccess(*object, index, v8::ACCESS_GET) &&
- isolate->MayIndexedAccess(*object, index, v8::ACCESS_HAS);
+ isolate->MayIndexedAccessWrapper(object, index, v8::ACCESS_GET) &&
+ isolate->MayIndexedAccessWrapper(object, index, v8::ACCESS_HAS);
} else {
- access_allowed = isolate->MayNamedAccess(*object, *key, v8::ACCESS_GET) &&
- isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS);
+ access_allowed =
+ isolate->MayNamedAccessWrapper(object, key, v8::ACCESS_GET) &&
+ isolate->MayNamedAccessWrapper(object, key, v8::ACCESS_HAS);
}
return isolate->heap()->ToBoolean(access_allowed);
}
@@ -14712,12 +14935,14 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
Handle<JSFunction> constructor,
Handle<AllocationSite> site,
Arguments* caller_args) {
+ Factory* factory = isolate->factory();
+
bool holey = false;
bool can_use_type_feedback = true;
if (caller_args->length() == 1) {
- Object* argument_one = (*caller_args)[0];
+ Handle<Object> argument_one = caller_args->at<Object>(0);
if (argument_one->IsSmi()) {
- int value = Smi::cast(argument_one)->value();
+ int value = Handle<Smi>::cast(argument_one)->value();
if (value < 0 || value >= JSObject::kInitialMaxFastElementArray) {
// the array is a dictionary in this case.
can_use_type_feedback = false;
@@ -14730,8 +14955,7 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
}
}
- JSArray* array;
- MaybeObject* maybe_array;
+ Handle<JSArray> array;
if (!site.is_null() && can_use_type_feedback) {
ElementsKind to_kind = site->GetElementsKind();
if (holey && !IsFastHoleyElementsKind(to_kind)) {
@@ -14740,27 +14964,40 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
site->SetElementsKind(to_kind);
}
- maybe_array = isolate->heap()->AllocateJSObjectWithAllocationSite(
- *constructor, site);
- if (!maybe_array->To(&array)) return maybe_array;
+ // We should allocate with an initial map that reflects the allocation site
+ // advice. Therefore we use AllocateJSObjectFromMap instead of passing
+ // the constructor.
+ Handle<Map> initial_map(constructor->initial_map(), isolate);
+ if (to_kind != initial_map->elements_kind()) {
+ initial_map = Map::AsElementsKind(initial_map, to_kind);
+ RETURN_IF_EMPTY_HANDLE(isolate, initial_map);
+ }
+
+ // If we don't care to track arrays of to_kind ElementsKind, then
+ // don't emit a memento for them.
+ Handle<AllocationSite> allocation_site;
+ if (AllocationSite::GetMode(to_kind) == TRACK_ALLOCATION_SITE) {
+ allocation_site = site;
+ }
+
+ array = Handle<JSArray>::cast(factory->NewJSObjectFromMap(
+ initial_map, NOT_TENURED, true, allocation_site));
} else {
- maybe_array = isolate->heap()->AllocateJSObject(*constructor);
- if (!maybe_array->To(&array)) return maybe_array;
+ array = Handle<JSArray>::cast(factory->NewJSObject(constructor));
+
// We might need to transition to holey
ElementsKind kind = constructor->initial_map()->elements_kind();
if (holey && !IsFastHoleyElementsKind(kind)) {
kind = GetHoleyElementsKind(kind);
- maybe_array = array->TransitionElementsKind(kind);
- if (maybe_array->IsFailure()) return maybe_array;
+ JSObject::TransitionElementsKind(array, kind);
}
}
- maybe_array = isolate->heap()->AllocateJSArrayStorage(array, 0, 0,
- DONT_INITIALIZE_ARRAY_ELEMENTS);
- if (maybe_array->IsFailure()) return maybe_array;
+ factory->NewJSArrayStorage(array, 0, 0, DONT_INITIALIZE_ARRAY_ELEMENTS);
+
ElementsKind old_kind = array->GetElementsKind();
- maybe_array = ArrayConstructInitializeElements(array, caller_args);
- if (maybe_array->IsFailure()) return maybe_array;
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ ArrayConstructInitializeElements(array, caller_args));
if (!site.is_null() &&
(old_kind != array->GetElementsKind() ||
!can_use_type_feedback)) {
@@ -14769,11 +15006,11 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
// We must mark the allocationsite as un-inlinable.
site->SetDoNotInlineCall();
}
- return array;
+ return *array;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConstructor) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_ArrayConstructor) {
HandleScope scope(isolate);
// If we get 2 arguments then they are the stub parameters (constructor, type
// info). If we get 4, then the first one is a pointer to the arguments
@@ -14810,7 +15047,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConstructor) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalArrayConstructor) {
+RUNTIME_FUNCTION(MaybeObject*, RuntimeHidden_InternalArrayConstructor) {
HandleScope scope(isolate);
Arguments empty_args(0, NULL);
bool no_caller_args = args.length() == 1;
@@ -14846,25 +15083,44 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MaxSmi) {
FUNCTION_ADDR(Runtime_##name), number_of_args, result_size },
+#define FH(name, number_of_args, result_size) \
+ { Runtime::kHidden##name, Runtime::RUNTIME_HIDDEN, NULL, \
+ FUNCTION_ADDR(RuntimeHidden_##name), number_of_args, result_size },
+
+
#define I(name, number_of_args, result_size) \
{ Runtime::kInline##name, Runtime::INLINE, \
"_" #name, NULL, number_of_args, result_size },
+
+#define IO(name, number_of_args, result_size) \
+ { Runtime::kInlineOptimized##name, Runtime::INLINE_OPTIMIZED, \
+ "_" #name, FUNCTION_ADDR(Runtime_##name), number_of_args, result_size },
+
+
static const Runtime::Function kIntrinsicFunctions[] = {
RUNTIME_FUNCTION_LIST(F)
+ RUNTIME_HIDDEN_FUNCTION_LIST(FH)
INLINE_FUNCTION_LIST(I)
- INLINE_RUNTIME_FUNCTION_LIST(I)
+ INLINE_OPTIMIZED_FUNCTION_LIST(IO)
};
+#undef IO
+#undef I
+#undef FH
+#undef F
+
MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Heap* heap,
Object* dictionary) {
ASSERT(dictionary != NULL);
ASSERT(NameDictionary::cast(dictionary)->NumberOfElements() == 0);
for (int i = 0; i < kNumFunctions; ++i) {
+ const char* name = kIntrinsicFunctions[i].name;
+ if (name == NULL) continue;
Object* name_string;
{ MaybeObject* maybe_name_string =
- heap->InternalizeUtf8String(kIntrinsicFunctions[i].name);
+ heap->InternalizeUtf8String(name);
if (!maybe_name_string->ToObject(&name_string)) return maybe_name_string;
}
NameDictionary* name_dictionary = NameDictionary::cast(dictionary);
@@ -14921,4 +15177,9 @@ void Runtime::PerformGC(Object* result, Isolate* isolate) {
}
+void Runtime::OutOfMemory() {
+ Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true);
+ UNREACHABLE();
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index 0506e9d86..58cd5259c 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -64,7 +64,7 @@ namespace internal {
F(ToFastProperties, 1, 1) \
F(FinishArrayPrototypeSetup, 1, 1) \
F(SpecialArrayFunctions, 1, 1) \
- F(IsClassicModeFunction, 1, 1) \
+ F(IsSloppyModeFunction, 1, 1) \
F(GetDefaultReceiver, 1, 1) \
\
F(GetPrototype, 1, 1) \
@@ -83,13 +83,6 @@ namespace internal {
F(Apply, 5, 1) \
F(GetFunctionDelegate, 1, 1) \
F(GetConstructorDelegate, 1, 1) \
- F(NewArgumentsFast, 3, 1) \
- F(NewStrictArgumentsFast, 3, 1) \
- F(CompileUnoptimized, 1, 1) \
- F(CompileOptimized, 2, 1) \
- F(TryInstallOptimizedCode, 1, 1) \
- F(NotifyDeoptimized, 1, 1) \
- F(NotifyStubFailure, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
F(ClearFunctionTypeFeedback, 1, 1) \
F(RunningInSimulator, 0, 1) \
@@ -101,8 +94,6 @@ namespace internal {
F(UnblockConcurrentRecompilation, 0, 1) \
F(CompileForOnStackReplacement, 1, 1) \
F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
- F(AllocateInNewSpace, 1, 1) \
- F(AllocateInTargetSpace, 2, 1) \
F(SetNativeFlag, 1, 1) \
F(SetInlineBuiltinFlag, 1, 1) \
F(StoreArrayLiteralElement, 5, 1) \
@@ -111,7 +102,6 @@ namespace internal {
F(FlattenString, 1, 1) \
F(TryMigrateInstance, 1, 1) \
F(NotifyContextDisposed, 0, 1) \
- F(MaxSmi, 0, 1) \
\
/* Array join support */ \
F(PushIfAbsent, 2, 1) \
@@ -131,15 +121,10 @@ namespace internal {
F(URIEscape, 1, 1) \
F(URIUnescape, 1, 1) \
\
- F(NumberToString, 1, 1) \
- F(NumberToStringSkipCache, 1, 1) \
F(NumberToInteger, 1, 1) \
- F(NumberToPositiveInteger, 1, 1) \
F(NumberToIntegerMapMinusZero, 1, 1) \
F(NumberToJSUint32, 1, 1) \
F(NumberToJSInt32, 1, 1) \
- F(NumberToSmi, 1, 1) \
- F(AllocateHeapNumber, 0, 1) \
\
/* Arithmetic operations */ \
F(NumberAdd, 2, 1) \
@@ -151,7 +136,6 @@ namespace internal {
F(NumberAlloc, 0, 1) \
F(NumberImul, 2, 1) \
\
- F(StringAdd, 2, 1) \
F(StringBuilderConcat, 3, 1) \
F(StringBuilderJoin, 3, 1) \
F(SparseJoinWithSeparator, 3, 1) \
@@ -171,27 +155,25 @@ namespace internal {
\
F(NumberCompare, 3, 1) \
F(SmiLexicographicCompare, 2, 1) \
- F(StringCompare, 2, 1) \
\
/* Math */ \
F(Math_acos, 1, 1) \
F(Math_asin, 1, 1) \
F(Math_atan, 1, 1) \
- F(Math_atan2, 2, 1) \
+ F(Math_log, 1, 1) \
+ F(Math_sqrt, 1, 1) \
F(Math_exp, 1, 1) \
F(Math_floor, 1, 1) \
- F(Math_log, 1, 1) \
F(Math_pow, 2, 1) \
F(Math_pow_cfunction, 2, 1) \
+ F(Math_atan2, 2, 1) \
F(RoundNumber, 1, 1) \
- F(Math_sqrt, 1, 1) \
+ F(Math_fround, 1, 1) \
\
/* Regular expressions */ \
F(RegExpCompile, 3, 1) \
- F(RegExpExec, 4, 1) \
F(RegExpExecMultiple, 4, 1) \
F(RegExpInitializeObject, 5, 1) \
- F(RegExpConstructResult, 3, 1) \
\
/* JSON */ \
F(ParseJson, 1, 1) \
@@ -199,11 +181,9 @@ namespace internal {
F(QuoteJSONString, 1, 1) \
\
/* Strings */ \
- F(StringCharCodeAt, 2, 1) \
F(StringIndexOf, 3, 1) \
F(StringLastIndexOf, 3, 1) \
F(StringLocaleCompare, 2, 1) \
- F(SubString, 3, 1) \
F(StringReplaceGlobalRegExpWithString, 4, 1) \
F(StringReplaceOneCharWithString, 3, 1) \
F(StringMatch, 3, 1) \
@@ -246,7 +226,6 @@ namespace internal {
F(GetAndClearOverflowedStackTrace, 1, 1) \
F(GetV8Version, 0, 1) \
\
- F(ClassOf, 1, 1) \
F(SetCode, 2, 1) \
F(SetExpectedNumberOfProperties, 2, 1) \
\
@@ -264,57 +243,47 @@ namespace internal {
F(DateToUTC, 1, 1) \
F(DateMakeDay, 2, 1) \
F(DateSetValue, 3, 1) \
- \
- /* Numbers */ \
+ F(DateCacheVersion, 0, 1) \
\
/* Globals */ \
F(CompileString, 2, 1) \
- F(GlobalPrint, 1, 1) \
\
/* Eval */ \
F(GlobalReceiver, 1, 1) \
F(IsAttachedGlobal, 1, 1) \
- F(ResolvePossiblyDirectEval, 5, 2) \
\
F(SetProperty, -1 /* 4 or 5 */, 1) \
F(DefineOrRedefineDataProperty, 4, 1) \
F(DefineOrRedefineAccessorProperty, 5, 1) \
F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \
F(GetDataProperty, 2, 1) \
+ F(SetHiddenProperty, 3, 1) \
\
/* Arrays */ \
F(RemoveArrayHoles, 2, 1) \
F(GetArrayKeys, 2, 1) \
F(MoveArrayContents, 2, 1) \
F(EstimateNumberOfElements, 1, 1) \
- F(ArrayConstructor, -1, 1) \
- F(InternalArrayConstructor, -1, 1) \
\
/* Getters and Setters */ \
F(LookupAccessor, 3, 1) \
\
- /* Literals */ \
- F(MaterializeRegExpLiteral, 4, 1)\
- F(CreateObjectLiteral, 4, 1) \
- F(CreateArrayLiteral, 4, 1) \
- F(CreateArrayLiteralStubBailout, 3, 1) \
- \
- /* Harmony generators */ \
- F(CreateJSGeneratorObject, 0, 1) \
- F(SuspendJSGeneratorObject, 1, 1) \
- F(ResumeJSGeneratorObject, 3, 1) \
- F(ThrowGeneratorStateError, 1, 1) \
- \
/* ES5 */ \
F(ObjectFreeze, 1, 1) \
\
+ /* Harmony microtasks */ \
+ F(GetMicrotaskState, 0, 1) \
+ \
/* Harmony modules */ \
F(IsJSModule, 1, 1) \
\
/* Harmony symbols */ \
F(CreateSymbol, 1, 1) \
F(CreatePrivateSymbol, 1, 1) \
- F(SymbolName, 1, 1) \
+ F(CreateGlobalPrivateSymbol, 1, 1) \
+ F(NewSymbolWrapper, 1, 1) \
+ F(SymbolDescription, 1, 1) \
+ F(SymbolRegistry, 0, 1) \
F(SymbolIsPrivate, 1, 1) \
\
/* Harmony proxies */ \
@@ -351,6 +320,7 @@ namespace internal {
\
/* Harmony events */ \
F(SetMicrotaskPending, 1, 1) \
+ F(RunMicrotasks, 0, 1) \
\
/* Harmony observe */ \
F(IsObserved, 1, 1) \
@@ -367,7 +337,6 @@ namespace internal {
F(ArrayBufferIsView, 1, 1) \
F(ArrayBufferNeuter, 1, 1) \
\
- F(TypedArrayInitialize, 5, 1) \
F(TypedArrayInitializeFromArrayLike, 4, 1) \
F(TypedArrayGetBuffer, 1, 1) \
F(TypedArrayGetByteLength, 1, 1) \
@@ -375,7 +344,6 @@ namespace internal {
F(TypedArrayGetLength, 1, 1) \
F(TypedArraySetFastCases, 3, 1) \
\
- F(DataViewInitialize, 4, 1) \
F(DataViewGetBuffer, 1, 1) \
F(DataViewGetByteLength, 1, 1) \
F(DataViewGetByteOffset, 1, 1) \
@@ -398,54 +366,22 @@ namespace internal {
F(DataViewSetFloat64, 4, 1) \
\
/* Statements */ \
- F(NewClosure, 3, 1) \
- F(NewClosureFromStubFailure, 1, 1) \
- F(NewObject, 1, 1) \
F(NewObjectFromBound, 1, 1) \
- F(FinalizeInstanceSize, 1, 1) \
- F(Throw, 1, 1) \
- F(ReThrow, 1, 1) \
- F(ThrowReferenceError, 1, 1) \
- F(ThrowNotDateError, 0, 1) \
- F(ThrowMessage, 1, 1) \
- F(StackGuard, 0, 1) \
- F(Interrupt, 0, 1) \
- F(PromoteScheduledException, 0, 1) \
- \
- /* Contexts */ \
- F(NewGlobalContext, 2, 1) \
- F(NewFunctionContext, 1, 1) \
- F(PushWithContext, 2, 1) \
- F(PushCatchContext, 3, 1) \
- F(PushBlockContext, 2, 1) \
- F(PushModuleContext, 2, 1) \
- F(DeleteContextSlot, 2, 1) \
- F(LoadContextSlot, 2, 2) \
- F(LoadContextSlotNoReferenceError, 2, 2) \
- F(StoreContextSlot, 4, 1) \
\
/* Declarations and initialization */ \
- F(DeclareGlobals, 3, 1) \
- F(DeclareModules, 1, 1) \
- F(DeclareContextSlot, 4, 1) \
F(InitializeVarGlobal, -1 /* 2 or 3 */, 1) \
- F(InitializeConstGlobal, 2, 1) \
- F(InitializeConstContextSlot, 3, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
\
/* Debugging */ \
F(DebugPrint, 1, 1) \
+ F(GlobalPrint, 1, 1) \
F(DebugTrace, 0, 1) \
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
- F(Abort, 2, 1) \
+ F(Abort, 1, 1) \
F(AbortJS, 1, 1) \
- /* Logging */ \
- F(Log, 2, 1) \
/* ES5 */ \
F(LocalKeys, 1, 1) \
- /* Cache suport */ \
- F(GetFromCache, 2, 1) \
\
/* Message objects */ \
F(MessageGetStartPosition, 1, 1) \
@@ -461,7 +397,7 @@ namespace internal {
F(HasFastDoubleElements, 1, 1) \
F(HasFastHoleyElements, 1, 1) \
F(HasDictionaryElements, 1, 1) \
- F(HasNonStrictArgumentsElements, 1, 1) \
+ F(HasSloppyArgumentsElements, 1, 1) \
F(HasExternalUint8ClampedElements, 1, 1) \
F(HasExternalArrayElements, 1, 1) \
F(HasExternalInt8Elements, 1, 1) \
@@ -472,6 +408,15 @@ namespace internal {
F(HasExternalUint32Elements, 1, 1) \
F(HasExternalFloat32Elements, 1, 1) \
F(HasExternalFloat64Elements, 1, 1) \
+ F(HasFixedUint8ClampedElements, 1, 1) \
+ F(HasFixedInt8Elements, 1, 1) \
+ F(HasFixedUint8Elements, 1, 1) \
+ F(HasFixedInt16Elements, 1, 1) \
+ F(HasFixedUint16Elements, 1, 1) \
+ F(HasFixedInt32Elements, 1, 1) \
+ F(HasFixedUint32Elements, 1, 1) \
+ F(HasFixedFloat32Elements, 1, 1) \
+ F(HasFixedFloat64Elements, 1, 1) \
F(HasFastProperties, 1, 1) \
F(TransitionElementsKind, 2, 1) \
F(HaveSameMap, 2, 1) \
@@ -497,6 +442,7 @@ namespace internal {
F(GetScopeCount, 2, 1) \
F(GetStepInPositions, 2, 1) \
F(GetScopeDetails, 4, 1) \
+ F(GetAllScopesDetails, 4, 1) \
F(GetFunctionScopeCount, 1, 1) \
F(GetFunctionScopeDetails, 2, 1) \
F(SetScopeVariableValue, 6, 1) \
@@ -597,6 +543,7 @@ namespace internal {
// RUNTIME_FUNCTION_LIST defines all runtime functions accessed
// either directly by id (via the code generator), or indirectly
// via a native call by name (from within JS code).
+// Entries have the form F(name, number of arguments, number of return values).
#define RUNTIME_FUNCTION_LIST(F) \
RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
@@ -605,6 +552,90 @@ namespace internal {
RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F)
+// RUNTIME_HIDDEN_FUNCTION_LIST defines all runtime functions accessed
+// by id from code generator, but not via native call by name.
+// Entries have the form F(name, number of arguments, number of return values).
+#define RUNTIME_HIDDEN_FUNCTION_LIST(F) \
+ F(NumberToString, 1, 1) \
+ F(RegExpConstructResult, 3, 1) \
+ F(RegExpExec, 4, 1) \
+ F(StringAdd, 2, 1) \
+ F(SubString, 3, 1) \
+ F(StringCompare, 2, 1) \
+ F(StringCharCodeAt, 2, 1) \
+ F(Log, 3, 1) \
+ F(GetFromCache, 2, 1) \
+ \
+ /* Compilation */ \
+ F(CompileUnoptimized, 1, 1) \
+ F(CompileOptimized, 2, 1) \
+ F(TryInstallOptimizedCode, 1, 1) \
+ F(NotifyDeoptimized, 1, 1) \
+ F(NotifyStubFailure, 0, 1) \
+ \
+ /* Utilities */ \
+ F(AllocateInNewSpace, 1, 1) \
+ F(AllocateInTargetSpace, 2, 1) \
+ F(AllocateHeapNumber, 0, 1) \
+ F(NumberToSmi, 1, 1) \
+ F(NumberToStringSkipCache, 1, 1) \
+ \
+ F(NewArgumentsFast, 3, 1) \
+ F(NewStrictArgumentsFast, 3, 1) \
+ \
+ /* Harmony generators */ \
+ F(CreateJSGeneratorObject, 0, 1) \
+ F(SuspendJSGeneratorObject, 1, 1) \
+ F(ResumeJSGeneratorObject, 3, 1) \
+ F(ThrowGeneratorStateError, 1, 1) \
+ \
+ /* Arrays */ \
+ F(ArrayConstructor, -1, 1) \
+ F(InternalArrayConstructor, -1, 1) \
+ \
+ /* Literals */ \
+ F(MaterializeRegExpLiteral, 4, 1)\
+ F(CreateObjectLiteral, 4, 1) \
+ F(CreateArrayLiteral, 4, 1) \
+ F(CreateArrayLiteralStubBailout, 3, 1) \
+ \
+ /* Statements */ \
+ F(NewClosure, 3, 1) \
+ F(NewClosureFromStubFailure, 1, 1) \
+ F(NewObject, 1, 1) \
+ F(NewObjectWithAllocationSite, 2, 1) \
+ F(FinalizeInstanceSize, 1, 1) \
+ F(Throw, 1, 1) \
+ F(ReThrow, 1, 1) \
+ F(ThrowReferenceError, 1, 1) \
+ F(ThrowNotDateError, 0, 1) \
+ F(ThrowMessage, 1, 1) \
+ F(StackGuard, 0, 1) \
+ F(Interrupt, 0, 1) \
+ F(PromoteScheduledException, 0, 1) \
+ \
+ /* Contexts */ \
+ F(NewGlobalContext, 2, 1) \
+ F(NewFunctionContext, 1, 1) \
+ F(PushWithContext, 2, 1) \
+ F(PushCatchContext, 3, 1) \
+ F(PushBlockContext, 2, 1) \
+ F(PushModuleContext, 2, 1) \
+ F(DeleteContextSlot, 2, 1) \
+ F(LoadContextSlot, 2, 2) \
+ F(LoadContextSlotNoReferenceError, 2, 2) \
+ F(StoreContextSlot, 4, 1) \
+ \
+ /* Declarations and initialization */ \
+ F(DeclareGlobals, 3, 1) \
+ F(DeclareModules, 1, 1) \
+ F(DeclareContextSlot, 4, 1) \
+ F(InitializeConstGlobal, 2, 1) \
+ F(InitializeConstContextSlot, 3, 1) \
+ \
+ /* Eval */ \
+ F(ResolvePossiblyDirectEval, 5, 2)
+
// ----------------------------------------------------------------------------
// INLINE_FUNCTION_LIST defines all inlined functions accessed
// with a native call of the form %_name from within JS code.
@@ -640,15 +671,7 @@ namespace internal {
F(FastAsciiArrayJoin, 2, 1) \
F(GeneratorNext, 2, 1) \
F(GeneratorThrow, 2, 1) \
- F(DebugBreakInOptimizedCode, 0, 1)
-
-
-// ----------------------------------------------------------------------------
-// INLINE_RUNTIME_FUNCTION_LIST defines all inlined functions accessed
-// with a native call of the form %_name from within JS code that also have
-// a corresponding runtime function, that is called for slow cases.
-// Entries have the form F(name, number of arguments, number of return values).
-#define INLINE_RUNTIME_FUNCTION_LIST(F) \
+ F(DebugBreakInOptimizedCode, 0, 1) \
F(ClassOf, 1, 1) \
F(StringCharCodeAt, 2, 1) \
F(Log, 3, 1) \
@@ -661,6 +684,21 @@ namespace internal {
F(NumberToString, 1, 1)
+// ----------------------------------------------------------------------------
+// INLINE_OPTIMIZED_FUNCTION_LIST defines all inlined functions accessed
+// with a native call of the form %_name from within JS code that also have
+// a corresponding runtime function, that is called from non-optimized code.
+// Entries have the form F(name, number of arguments, number of return values).
+#define INLINE_OPTIMIZED_FUNCTION_LIST(F) \
+ F(DoubleHi, 1, 1) \
+ F(DoubleLo, 1, 1) \
+ F(ConstructDouble, 2, 1) \
+ F(TypedArrayInitialize, 5, 1) \
+ F(DataViewInitialize, 4, 1) \
+ F(MaxSmi, 0, 1) \
+ F(TypedArrayMaxSizeInHeap, 0, 1)
+
+
//---------------------------------------------------------------------------
// Runtime provides access to all C++ runtime functions.
@@ -712,9 +750,14 @@ class Runtime : public AllStatic {
#define F(name, nargs, ressize) k##name,
RUNTIME_FUNCTION_LIST(F)
#undef F
+#define F(name, nargs, ressize) kHidden##name,
+ RUNTIME_HIDDEN_FUNCTION_LIST(F)
+#undef F
#define F(name, nargs, ressize) kInline##name,
INLINE_FUNCTION_LIST(F)
- INLINE_RUNTIME_FUNCTION_LIST(F)
+#undef F
+#define F(name, nargs, ressize) kInlineOptimized##name,
+ INLINE_OPTIMIZED_FUNCTION_LIST(F)
#undef F
kNumFunctions,
kFirstInlineFunction = kInlineIsSmi
@@ -722,7 +765,9 @@ class Runtime : public AllStatic {
enum IntrinsicType {
RUNTIME,
- INLINE
+ RUNTIME_HIDDEN,
+ INLINE,
+ INLINE_OPTIMIZED
};
// Intrinsic function descriptor.
@@ -771,14 +816,9 @@ class Runtime : public AllStatic {
// Support getting the characters in a string using [] notation as
// in Firefox/SpiderMonkey, Safari and Opera.
- MUST_USE_RESULT static MaybeObject* GetElementOrCharAt(Isolate* isolate,
- Handle<Object> object,
- uint32_t index);
-
- MUST_USE_RESULT static MaybeObject* GetElementOrCharAtOrFail(
- Isolate* isolate,
- Handle<Object> object,
- uint32_t index);
+ static Handle<Object> GetElementOrCharAt(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index);
static Handle<Object> SetObjectProperty(
Isolate* isolate,
@@ -786,7 +826,7 @@ class Runtime : public AllStatic {
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attr,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
static Handle<Object> ForceSetObjectProperty(
Isolate* isolate,
@@ -848,10 +888,14 @@ class Runtime : public AllStatic {
};
static void ArrayIdToTypeAndSize(int array_id,
- ExternalArrayType *type, size_t *element_size);
+ ExternalArrayType *type,
+ ElementsKind* external_elements_kind,
+ ElementsKind* fixed_elements_kind,
+ size_t *element_size);
// Helper functions used stubs.
static void PerformGC(Object* result, Isolate* isolate);
+ static void OutOfMemory();
// Used in runtime.cc and hydrogen's VisitArrayLiteral.
static Handle<Object> CreateArrayLiteralBoilerplate(
@@ -864,12 +908,12 @@ class Runtime : public AllStatic {
//---------------------------------------------------------------------------
// Constants used by interface to runtime functions.
-class AllocateDoubleAlignFlag: public BitField<bool, 0, 1> {};
-class AllocateTargetSpace: public BitField<AllocationSpace, 1, 3> {};
+class AllocateDoubleAlignFlag: public BitField<bool, 0, 1> {};
+class AllocateTargetSpace: public BitField<AllocationSpace, 1, 3> {};
-class DeclareGlobalsEvalFlag: public BitField<bool, 0, 1> {};
-class DeclareGlobalsNativeFlag: public BitField<bool, 1, 1> {};
-class DeclareGlobalsLanguageMode: public BitField<LanguageMode, 2, 2> {};
+class DeclareGlobalsEvalFlag: public BitField<bool, 0, 1> {};
+class DeclareGlobalsNativeFlag: public BitField<bool, 1, 1> {};
+class DeclareGlobalsStrictMode: public BitField<StrictMode, 2, 1> {};
} } // namespace v8::internal
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index 2a949ae8d..a49bc8448 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -75,11 +75,8 @@ function EQUALS(y) {
y = %ToPrimitive(y, NO_HINT);
}
} else if (IS_SYMBOL(x)) {
- while (true) {
- if (IS_SYMBOL(y)) return %_ObjectEquals(x, y) ? 0 : 1;
- if (!IS_SPEC_OBJECT(y)) return 1; // not equal
- y = %ToPrimitive(y, NO_HINT);
- }
+ if (IS_SYMBOL(y)) return %_ObjectEquals(x, y) ? 0 : 1;
+ return 1; // not equal
} else if (IS_BOOLEAN(x)) {
if (IS_BOOLEAN(y)) return %_ObjectEquals(x, y) ? 0 : 1;
if (IS_NULL_OR_UNDEFINED(y)) return 1;
@@ -97,6 +94,7 @@ function EQUALS(y) {
return %_ObjectEquals(x, y) ? 0 : 1;
}
if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
+ if (IS_SYMBOL(y)) return 1; // not equal
if (IS_BOOLEAN(y)) y = %ToNumber(y);
x = %ToPrimitive(x, NO_HINT);
}
@@ -501,7 +499,7 @@ function ToPrimitive(x, hint) {
if (IS_STRING(x)) return x;
// Normal behavior.
if (!IS_SPEC_OBJECT(x)) return x;
- if (IS_SYMBOL_WRAPPER(x)) return %_ValueOf(x);
+ if (IS_SYMBOL_WRAPPER(x)) throw MakeTypeError('symbol_to_primitive', []);
if (hint == NO_HINT) hint = (IS_DATE(x)) ? STRING_HINT : NUMBER_HINT;
return (hint == NUMBER_HINT) ? %DefaultNumber(x) : %DefaultString(x);
}
@@ -548,6 +546,7 @@ function ToString(x) {
if (IS_NUMBER(x)) return %_NumberToString(x);
if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
if (IS_UNDEFINED(x)) return 'undefined';
+ if (IS_SYMBOL(x)) throw %MakeTypeError('symbol_to_string', []);
return (IS_NULL(x)) ? 'null' : %ToString(%DefaultString(x));
}
@@ -555,6 +554,7 @@ function NonStringToString(x) {
if (IS_NUMBER(x)) return %_NumberToString(x);
if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
if (IS_UNDEFINED(x)) return 'undefined';
+ if (IS_SYMBOL(x)) throw %MakeTypeError('symbol_to_string', []);
return (IS_NULL(x)) ? 'null' : %ToString(%DefaultString(x));
}
@@ -568,9 +568,9 @@ function ToName(x) {
// ECMA-262, section 9.9, page 36.
function ToObject(x) {
if (IS_STRING(x)) return new $String(x);
- if (IS_SYMBOL(x)) return new $Symbol(x);
if (IS_NUMBER(x)) return new $Number(x);
if (IS_BOOLEAN(x)) return new $Boolean(x);
+ if (IS_SYMBOL(x)) return %NewSymbolWrapper(x);
if (IS_NULL_OR_UNDEFINED(x) && !IS_UNDETECTABLE(x)) {
throw %MakeTypeError('undefined_or_null_to_object', []);
}
diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/sampler.cc
index cb98b6fdc..c6830e690 100644
--- a/deps/v8/src/sampler.cc
+++ b/deps/v8/src/sampler.cc
@@ -54,7 +54,8 @@
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+ (defined(__arm__) || defined(__aarch64__)) && \
+ !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h>
#endif
@@ -97,6 +98,18 @@ typedef struct ucontext {
// Other fields are not used by V8, don't define them here.
} ucontext_t;
+#elif defined(__aarch64__)
+
+typedef struct sigcontext mcontext_t;
+
+typedef struct ucontext {
+ uint64_t uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+
#elif defined(__mips__)
// MIPS version of sigcontext, for Android bionic.
typedef struct {
@@ -146,6 +159,23 @@ typedef struct ucontext {
// Other fields are not used by V8, don't define them here.
} ucontext_t;
enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
+
+#elif defined(__x86_64__)
+// x64 version for Android.
+typedef struct {
+ uint64_t gregs[23];
+ void* fpregs;
+ uint64_t __reserved1[8];
+} mcontext_t;
+
+typedef struct ucontext {
+ uint64_t uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
#endif
#endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
@@ -226,13 +256,27 @@ class SimulatorHelper {
}
inline void FillRegisters(RegisterState* state) {
+#if V8_TARGET_ARCH_ARM
state->pc = reinterpret_cast<Address>(simulator_->get_pc());
state->sp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::sp));
-#if V8_TARGET_ARCH_ARM
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::r11));
+#elif V8_TARGET_ARCH_ARM64
+ if (simulator_->sp() == 0 || simulator_->fp() == 0) {
+ // It possible that the simulator is interrupted while it is updating
+ // the sp or fp register. ARM64 simulator does this in two steps:
+ // first setting it to zero and then setting it to the new value.
+ // Bailout if sp/fp doesn't contain the new value.
+ return;
+ }
+ state->pc = reinterpret_cast<Address>(simulator_->pc());
+ state->sp = reinterpret_cast<Address>(simulator_->sp());
+ state->fp = reinterpret_cast<Address>(simulator_->fp());
#elif V8_TARGET_ARCH_MIPS
+ state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+ state->sp = reinterpret_cast<Address>(simulator_->get_register(
+ Simulator::sp));
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::fp));
#endif
@@ -329,6 +373,11 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
SimulatorHelper helper;
if (!helper.Init(sampler, isolate)) return;
helper.FillRegisters(&state);
+ // It possible that the simulator is interrupted while it is updating
+ // the sp or fp register. ARM64 simulator does this in two steps:
+ // first setting it to zero and then setting it to the new value.
+ // Bailout if sp/fp doesn't contain the new value.
+ if (state.sp == 0 || state.fp == 0) return;
#else
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
@@ -358,6 +407,11 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
// (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+#elif V8_HOST_ARCH_ARM64
+ state.pc = reinterpret_cast<Address>(mcontext.pc);
+ state.sp = reinterpret_cast<Address>(mcontext.sp);
+ // FP is an alias for x29.
+ state.fp = reinterpret_cast<Address>(mcontext.regs[29]);
#elif V8_HOST_ARCH_MIPS
state.pc = reinterpret_cast<Address>(mcontext.pc);
state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index 26f840b23..48bfd3326 100644
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -35,6 +35,8 @@
#include "char-predicates-inl.h"
#include "conversions-inl.h"
#include "list-inl.h"
+#include "v8.h"
+#include "parser.h"
namespace v8 {
namespace internal {
@@ -246,7 +248,8 @@ Token::Value Scanner::Next() {
}
-static inline bool IsByteOrderMark(uc32 c) {
+// TODO(yangguo): check whether this is actually necessary.
+static inline bool IsLittleEndianByteOrderMark(uc32 c) {
// The Unicode value U+FFFE is guaranteed never to be assigned as a
// Unicode character; this implies that in a Unicode context the
// 0xFF, 0xFE byte pattern can only be interpreted as the U+FEFF
@@ -254,7 +257,7 @@ static inline bool IsByteOrderMark(uc32 c) {
// not be a U+FFFE character expressed in big-endian byte
// order). Nevertheless, we check for it to be compatible with
// Spidermonkey.
- return c == 0xFEFF || c == 0xFFFE;
+ return c == 0xFFFE;
}
@@ -262,14 +265,14 @@ bool Scanner::SkipWhiteSpace() {
int start_position = source_pos();
while (true) {
- // We treat byte-order marks (BOMs) as whitespace for better
- // compatibility with Spidermonkey and other JavaScript engines.
- while (unicode_cache_->IsWhiteSpace(c0_) || IsByteOrderMark(c0_)) {
- // IsWhiteSpace() includes line terminators!
+ while (true) {
+ // Advance as long as character is a WhiteSpace or LineTerminator.
+ // Remember if the latter is the case.
if (unicode_cache_->IsLineTerminator(c0_)) {
- // Ignore line terminators, but remember them. This is necessary
- // for automatic semicolon insertion.
has_line_terminator_before_next_ = true;
+ } else if (!unicode_cache_->IsWhiteSpace(c0_) &&
+ !IsLittleEndianByteOrderMark(c0_)) {
+ break;
}
Advance();
}
@@ -906,7 +909,7 @@ uc32 Scanner::ScanIdentifierUnicodeEscape() {
KEYWORD("yield", Token::YIELD)
-static Token::Value KeywordOrIdentifierToken(const char* input,
+static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
int input_length,
bool harmony_scoping,
bool harmony_modules) {
@@ -981,8 +984,8 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
literal.Complete();
- if (next_.literal_chars->is_ascii()) {
- Vector<const char> chars = next_.literal_chars->ascii_literal();
+ if (next_.literal_chars->is_one_byte()) {
+ Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
return KeywordOrIdentifierToken(chars.start(),
chars.length(),
harmony_scoping_,
@@ -1113,21 +1116,74 @@ bool Scanner::ScanRegExpFlags() {
}
-int DuplicateFinder::AddAsciiSymbol(Vector<const char> key, int value) {
- return AddSymbol(Vector<const byte>::cast(key), true, value);
+Handle<String> Scanner::AllocateNextLiteralString(Isolate* isolate,
+ PretenureFlag tenured) {
+ if (is_next_literal_one_byte()) {
+ return isolate->factory()->NewStringFromOneByte(
+ Vector<const uint8_t>::cast(next_literal_one_byte_string()), tenured);
+ } else {
+ return isolate->factory()->NewStringFromTwoByte(
+ next_literal_two_byte_string(), tenured);
+ }
+}
+
+
+Handle<String> Scanner::AllocateInternalizedString(Isolate* isolate) {
+ if (is_literal_one_byte()) {
+ return isolate->factory()->InternalizeOneByteString(
+ literal_one_byte_string());
+ } else {
+ return isolate->factory()->InternalizeTwoByteString(
+ literal_two_byte_string());
+ }
+}
+
+
+double Scanner::DoubleValue() {
+ ASSERT(is_literal_one_byte());
+ return StringToDouble(
+ unicode_cache_, Vector<const char>::cast(literal_one_byte_string()),
+ ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
+}
+
+
+int Scanner::FindNumber(DuplicateFinder* finder, int value) {
+ return finder->AddNumber(literal_one_byte_string(), value);
+}
+
+
+int Scanner::FindSymbol(DuplicateFinder* finder, int value) {
+ if (is_literal_one_byte()) {
+ return finder->AddOneByteSymbol(literal_one_byte_string(), value);
+ }
+ return finder->AddTwoByteSymbol(literal_two_byte_string(), value);
+}
+
+
+void Scanner::LogSymbol(ParserRecorder* log, int position) {
+ if (is_literal_one_byte()) {
+ log->LogOneByteSymbol(position, literal_one_byte_string());
+ } else {
+ log->LogTwoByteSymbol(position, literal_two_byte_string());
+ }
+}
+
+
+int DuplicateFinder::AddOneByteSymbol(Vector<const uint8_t> key, int value) {
+ return AddSymbol(key, true, value);
}
-int DuplicateFinder::AddUtf16Symbol(Vector<const uint16_t> key, int value) {
- return AddSymbol(Vector<const byte>::cast(key), false, value);
+int DuplicateFinder::AddTwoByteSymbol(Vector<const uint16_t> key, int value) {
+ return AddSymbol(Vector<const uint8_t>::cast(key), false, value);
}
-int DuplicateFinder::AddSymbol(Vector<const byte> key,
- bool is_ascii,
+int DuplicateFinder::AddSymbol(Vector<const uint8_t> key,
+ bool is_one_byte,
int value) {
- uint32_t hash = Hash(key, is_ascii);
- byte* encoding = BackupKey(key, is_ascii);
+ uint32_t hash = Hash(key, is_one_byte);
+ byte* encoding = BackupKey(key, is_one_byte);
HashMap::Entry* entry = map_.Lookup(encoding, hash, true);
int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
entry->value =
@@ -1136,15 +1192,16 @@ int DuplicateFinder::AddSymbol(Vector<const byte> key,
}
-int DuplicateFinder::AddNumber(Vector<const char> key, int value) {
+int DuplicateFinder::AddNumber(Vector<const uint8_t> key, int value) {
ASSERT(key.length() > 0);
// Quick check for already being in canonical form.
if (IsNumberCanonical(key)) {
- return AddAsciiSymbol(key, value);
+ return AddOneByteSymbol(key, value);
}
int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY;
- double double_value = StringToDouble(unicode_constants_, key, flags, 0.0);
+ double double_value = StringToDouble(
+ unicode_constants_, Vector<const char>::cast(key), flags, 0.0);
int length;
const char* string;
if (!std::isfinite(double_value)) {
@@ -1160,7 +1217,7 @@ int DuplicateFinder::AddNumber(Vector<const char> key, int value) {
}
-bool DuplicateFinder::IsNumberCanonical(Vector<const char> number) {
+bool DuplicateFinder::IsNumberCanonical(Vector<const uint8_t> number) {
// Test for a safe approximation of number literals that are already
// in canonical form: max 15 digits, no leading zeroes, except an
// integer part that is a single zero, and no trailing zeros below
@@ -1179,7 +1236,7 @@ bool DuplicateFinder::IsNumberCanonical(Vector<const char> number) {
pos++;
bool invalid_last_digit = true;
while (pos < length) {
- byte digit = number[pos] - '0';
+ uint8_t digit = number[pos] - '0';
if (digit > '9' - '0') return false;
invalid_last_digit = (digit == 0);
pos++;
@@ -1188,11 +1245,11 @@ bool DuplicateFinder::IsNumberCanonical(Vector<const char> number) {
}
-uint32_t DuplicateFinder::Hash(Vector<const byte> key, bool is_ascii) {
+uint32_t DuplicateFinder::Hash(Vector<const uint8_t> key, bool is_one_byte) {
// Primitive hash function, almost identical to the one used
// for strings (except that it's seeded by the length and ASCII-ness).
int length = key.length();
- uint32_t hash = (length << 1) | (is_ascii ? 1 : 0) ;
+ uint32_t hash = (length << 1) | (is_one_byte ? 1 : 0) ;
for (int i = 0; i < length; i++) {
uint32_t c = key[i];
hash = (hash + c) * 1025;
@@ -1210,39 +1267,42 @@ bool DuplicateFinder::Match(void* first, void* second) {
// was ASCII.
byte* s1 = reinterpret_cast<byte*>(first);
byte* s2 = reinterpret_cast<byte*>(second);
- uint32_t length_ascii_field = 0;
+ uint32_t length_one_byte_field = 0;
byte c1;
do {
c1 = *s1;
if (c1 != *s2) return false;
- length_ascii_field = (length_ascii_field << 7) | (c1 & 0x7f);
+ length_one_byte_field = (length_one_byte_field << 7) | (c1 & 0x7f);
s1++;
s2++;
} while ((c1 & 0x80) != 0);
- int length = static_cast<int>(length_ascii_field >> 1);
+ int length = static_cast<int>(length_one_byte_field >> 1);
return memcmp(s1, s2, length) == 0;
}
-byte* DuplicateFinder::BackupKey(Vector<const byte> bytes,
- bool is_ascii) {
- uint32_t ascii_length = (bytes.length() << 1) | (is_ascii ? 1 : 0);
+byte* DuplicateFinder::BackupKey(Vector<const uint8_t> bytes,
+ bool is_one_byte) {
+ uint32_t one_byte_length = (bytes.length() << 1) | (is_one_byte ? 1 : 0);
backing_store_.StartSequence();
- // Emit ascii_length as base-128 encoded number, with the 7th bit set
+ // Emit one_byte_length as base-128 encoded number, with the 7th bit set
// on the byte of every heptet except the last, least significant, one.
- if (ascii_length >= (1 << 7)) {
- if (ascii_length >= (1 << 14)) {
- if (ascii_length >= (1 << 21)) {
- if (ascii_length >= (1 << 28)) {
- backing_store_.Add(static_cast<byte>((ascii_length >> 28) | 0x80));
+ if (one_byte_length >= (1 << 7)) {
+ if (one_byte_length >= (1 << 14)) {
+ if (one_byte_length >= (1 << 21)) {
+ if (one_byte_length >= (1 << 28)) {
+ backing_store_.Add(
+ static_cast<uint8_t>((one_byte_length >> 28) | 0x80));
}
- backing_store_.Add(static_cast<byte>((ascii_length >> 21) | 0x80u));
+ backing_store_.Add(
+ static_cast<uint8_t>((one_byte_length >> 21) | 0x80u));
}
- backing_store_.Add(static_cast<byte>((ascii_length >> 14) | 0x80u));
+ backing_store_.Add(
+ static_cast<uint8_t>((one_byte_length >> 14) | 0x80u));
}
- backing_store_.Add(static_cast<byte>((ascii_length >> 7) | 0x80u));
+ backing_store_.Add(static_cast<uint8_t>((one_byte_length >> 7) | 0x80u));
}
- backing_store_.Add(static_cast<byte>(ascii_length & 0x7f));
+ backing_store_.Add(static_cast<uint8_t>(one_byte_length & 0x7f));
backing_store_.AddBlock(bytes);
return backing_store_.EndSequence().start();
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index 3cefc833a..73026ab5a 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -44,6 +44,9 @@ namespace v8 {
namespace internal {
+class ParserRecorder;
+
+
// Returns the value (0 .. 15) of a hexadecimal character c.
// If c is not a legal hexadecimal character, returns a value < 0.
inline int HexValue(uc32 c) {
@@ -117,8 +120,8 @@ class Utf16CharacterStream {
virtual bool ReadBlock() = 0;
virtual unsigned SlowSeekForward(unsigned code_unit_count) = 0;
- const uc16* buffer_cursor_;
- const uc16* buffer_end_;
+ const uint16_t* buffer_cursor_;
+ const uint16_t* buffer_end_;
unsigned pos_;
};
@@ -139,12 +142,17 @@ class UnicodeCache {
bool IsIdentifierPart(unibrow::uchar c) { return kIsIdentifierPart.get(c); }
bool IsLineTerminator(unibrow::uchar c) { return kIsLineTerminator.get(c); }
bool IsWhiteSpace(unibrow::uchar c) { return kIsWhiteSpace.get(c); }
+ bool IsWhiteSpaceOrLineTerminator(unibrow::uchar c) {
+ return kIsWhiteSpaceOrLineTerminator.get(c);
+ }
private:
unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
- unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
+ unibrow::Predicate<WhiteSpace, 128> kIsWhiteSpace;
+ unibrow::Predicate<WhiteSpaceOrLineTerminator, 128>
+ kIsWhiteSpaceOrLineTerminator;
StaticResource<Utf8Decoder> utf8_decoder_;
DISALLOW_COPY_AND_ASSIGN(UnicodeCache);
@@ -161,32 +169,32 @@ class DuplicateFinder {
backing_store_(16),
map_(&Match) { }
- int AddAsciiSymbol(Vector<const char> key, int value);
- int AddUtf16Symbol(Vector<const uint16_t> key, int value);
+ int AddOneByteSymbol(Vector<const uint8_t> key, int value);
+ int AddTwoByteSymbol(Vector<const uint16_t> key, int value);
// Add a a number literal by converting it (if necessary)
// to the string that ToString(ToNumber(literal)) would generate.
// and then adding that string with AddAsciiSymbol.
// This string is the actual value used as key in an object literal,
// and the one that must be different from the other keys.
- int AddNumber(Vector<const char> key, int value);
+ int AddNumber(Vector<const uint8_t> key, int value);
private:
- int AddSymbol(Vector<const byte> key, bool is_ascii, int value);
+ int AddSymbol(Vector<const uint8_t> key, bool is_one_byte, int value);
// Backs up the key and its length in the backing store.
// The backup is stored with a base 127 encoding of the
- // length (plus a bit saying whether the string is ASCII),
+ // length (plus a bit saying whether the string is one byte),
// followed by the bytes of the key.
- byte* BackupKey(Vector<const byte> key, bool is_ascii);
+ uint8_t* BackupKey(Vector<const uint8_t> key, bool is_one_byte);
// Compare two encoded keys (both pointing into the backing store)
// for having the same base-127 encoded lengths and ASCII-ness,
// and then having the same 'length' bytes following.
static bool Match(void* first, void* second);
// Creates a hash from a sequence of bytes.
- static uint32_t Hash(Vector<const byte> key, bool is_ascii);
+ static uint32_t Hash(Vector<const uint8_t> key, bool is_one_byte);
// Checks whether a string containing a JS number is its canonical
// form.
- static bool IsNumberCanonical(Vector<const char> key);
+ static bool IsNumberCanonical(Vector<const uint8_t> key);
// Size of buffer. Sufficient for using it to call DoubleToCString in
// from conversions.h.
@@ -206,7 +214,7 @@ class DuplicateFinder {
class LiteralBuffer {
public:
- LiteralBuffer() : is_ascii_(true), position_(0), backing_store_() { }
+ LiteralBuffer() : is_one_byte_(true), position_(0), backing_store_() { }
~LiteralBuffer() {
if (backing_store_.length() > 0) {
@@ -216,48 +224,48 @@ class LiteralBuffer {
INLINE(void AddChar(uint32_t code_unit)) {
if (position_ >= backing_store_.length()) ExpandBuffer();
- if (is_ascii_) {
+ if (is_one_byte_) {
if (code_unit <= unibrow::Latin1::kMaxChar) {
backing_store_[position_] = static_cast<byte>(code_unit);
position_ += kOneByteSize;
return;
}
- ConvertToUtf16();
+ ConvertToTwoByte();
}
ASSERT(code_unit < 0x10000u);
- *reinterpret_cast<uc16*>(&backing_store_[position_]) = code_unit;
+ *reinterpret_cast<uint16_t*>(&backing_store_[position_]) = code_unit;
position_ += kUC16Size;
}
- bool is_ascii() { return is_ascii_; }
+ bool is_one_byte() { return is_one_byte_; }
bool is_contextual_keyword(Vector<const char> keyword) {
- return is_ascii() && keyword.length() == position_ &&
+ return is_one_byte() && keyword.length() == position_ &&
(memcmp(keyword.start(), backing_store_.start(), position_) == 0);
}
- Vector<const uc16> utf16_literal() {
- ASSERT(!is_ascii_);
+ Vector<const uint16_t> two_byte_literal() {
+ ASSERT(!is_one_byte_);
ASSERT((position_ & 0x1) == 0);
- return Vector<const uc16>(
- reinterpret_cast<const uc16*>(backing_store_.start()),
+ return Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(backing_store_.start()),
position_ >> 1);
}
- Vector<const char> ascii_literal() {
- ASSERT(is_ascii_);
- return Vector<const char>(
- reinterpret_cast<const char*>(backing_store_.start()),
+ Vector<const uint8_t> one_byte_literal() {
+ ASSERT(is_one_byte_);
+ return Vector<const uint8_t>(
+ reinterpret_cast<const uint8_t*>(backing_store_.start()),
position_);
}
int length() {
- return is_ascii_ ? position_ : (position_ >> 1);
+ return is_one_byte_ ? position_ : (position_ >> 1);
}
void Reset() {
position_ = 0;
- is_ascii_ = true;
+ is_one_byte_ = true;
}
private:
@@ -278,8 +286,8 @@ class LiteralBuffer {
backing_store_ = new_store;
}
- void ConvertToUtf16() {
- ASSERT(is_ascii_);
+ void ConvertToTwoByte() {
+ ASSERT(is_one_byte_);
Vector<byte> new_store;
int new_content_size = position_ * kUC16Size;
if (new_content_size >= backing_store_.length()) {
@@ -290,7 +298,7 @@ class LiteralBuffer {
new_store = backing_store_;
}
uint8_t* src = backing_store_.start();
- uc16* dst = reinterpret_cast<uc16*>(new_store.start());
+ uint16_t* dst = reinterpret_cast<uint16_t*>(new_store.start());
for (int i = position_ - 1; i >= 0; i--) {
dst[i] = src[i];
}
@@ -299,10 +307,10 @@ class LiteralBuffer {
backing_store_ = new_store;
}
position_ = new_content_size;
- is_ascii_ = false;
+ is_one_byte_ = false;
}
- bool is_ascii_;
+ bool is_one_byte_;
int position_;
Vector<byte> backing_store_;
@@ -365,32 +373,13 @@ class Scanner {
// Returns the location information for the current token
// (the token last returned by Next()).
Location location() const { return current_.location; }
- // Returns the literal string, if any, for the current token (the
- // token last returned by Next()). The string is 0-terminated.
- // Literal strings are collected for identifiers, strings, and
- // numbers.
- // These functions only give the correct result if the literal
- // was scanned between calls to StartLiteral() and TerminateLiteral().
- Vector<const char> literal_ascii_string() {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->ascii_literal();
- }
- Vector<const uc16> literal_utf16_string() {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->utf16_literal();
- }
- bool is_literal_ascii() {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->is_ascii();
- }
- bool is_literal_contextual_keyword(Vector<const char> keyword) {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->is_contextual_keyword(keyword);
- }
- int literal_length() const {
- ASSERT_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->length();
- }
+
+ // Similar functions for the upcoming token.
+
+ // One token look-ahead (past the token returned by Next()).
+ Token::Value peek() const { return next_.token; }
+
+ Location peek_location() const { return next_.location; }
bool literal_contains_escapes() const {
Location location = current_.location;
@@ -401,43 +390,47 @@ class Scanner {
}
return current_.literal_chars->length() != source_length;
}
-
- // Similar functions for the upcoming token.
-
- // One token look-ahead (past the token returned by Next()).
- Token::Value peek() const { return next_.token; }
-
- Location peek_location() const { return next_.location; }
-
- // Returns the literal string for the next token (the token that
- // would be returned if Next() were called).
- Vector<const char> next_literal_ascii_string() {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->ascii_literal();
- }
- Vector<const uc16> next_literal_utf16_string() {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->utf16_literal();
- }
- bool is_next_literal_ascii() {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->is_ascii();
+ bool is_literal_contextual_keyword(Vector<const char> keyword) {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->is_contextual_keyword(keyword);
}
bool is_next_contextual_keyword(Vector<const char> keyword) {
ASSERT_NOT_NULL(next_.literal_chars);
return next_.literal_chars->is_contextual_keyword(keyword);
}
- int next_literal_length() const {
- ASSERT_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->length();
+
+ Handle<String> AllocateNextLiteralString(Isolate* isolate,
+ PretenureFlag tenured);
+ Handle<String> AllocateInternalizedString(Isolate* isolate);
+
+ double DoubleValue();
+ bool UnescapedLiteralMatches(const char* data, int length) {
+ if (is_literal_one_byte() &&
+ literal_length() == length &&
+ !literal_contains_escapes()) {
+ const char* token =
+ reinterpret_cast<const char*>(literal_one_byte_string().start());
+ return !strncmp(token, data, length);
+ }
+ return false;
+ }
+ void IsGetOrSet(bool* is_get, bool* is_set) {
+ if (is_literal_one_byte() &&
+ literal_length() == 3 &&
+ !literal_contains_escapes()) {
+ const char* token =
+ reinterpret_cast<const char*>(literal_one_byte_string().start());
+ *is_get = strncmp(token, "get", 3) == 0;
+ *is_set = !*is_get && strncmp(token, "set", 3) == 0;
+ }
}
- UnicodeCache* unicode_cache() { return unicode_cache_; }
+ int FindNumber(DuplicateFinder* finder, int value);
+ int FindSymbol(DuplicateFinder* finder, int value);
- static const int kCharacterLookaheadBufferSize = 1;
+ void LogSymbol(ParserRecorder* log, int position);
- // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
- uc32 ScanOctalEscape(uc32 c, int length);
+ UnicodeCache* unicode_cache() { return unicode_cache_; }
// Returns the location of the last seen octal literal.
Location octal_position() const { return octal_pos_; }
@@ -490,6 +483,11 @@ class Scanner {
LiteralBuffer* literal_chars;
};
+ static const int kCharacterLookaheadBufferSize = 1;
+
+ // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
+ uc32 ScanOctalEscape(uc32 c, int length);
+
// Call this after setting source_ to the input.
void Init() {
// Set c0_ (one character ahead)
@@ -550,6 +548,47 @@ class Scanner {
}
}
+ // Returns the literal string, if any, for the current token (the
+ // token last returned by Next()). The string is 0-terminated.
+ // Literal strings are collected for identifiers, strings, and
+ // numbers.
+ // These functions only give the correct result if the literal
+ // was scanned between calls to StartLiteral() and TerminateLiteral().
+ Vector<const uint8_t> literal_one_byte_string() {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->one_byte_literal();
+ }
+ Vector<const uint16_t> literal_two_byte_string() {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->two_byte_literal();
+ }
+ bool is_literal_one_byte() {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->is_one_byte();
+ }
+ int literal_length() const {
+ ASSERT_NOT_NULL(current_.literal_chars);
+ return current_.literal_chars->length();
+ }
+ // Returns the literal string for the next token (the token that
+ // would be returned if Next() were called).
+ Vector<const uint8_t> next_literal_one_byte_string() {
+ ASSERT_NOT_NULL(next_.literal_chars);
+ return next_.literal_chars->one_byte_literal();
+ }
+ Vector<const uint16_t> next_literal_two_byte_string() {
+ ASSERT_NOT_NULL(next_.literal_chars);
+ return next_.literal_chars->two_byte_literal();
+ }
+ bool is_next_literal_one_byte() {
+ ASSERT_NOT_NULL(next_.literal_chars);
+ return next_.literal_chars->is_one_byte();
+ }
+ int next_literal_length() const {
+ ASSERT_NOT_NULL(next_.literal_chars);
+ return next_.literal_chars->length();
+ }
+
uc32 ScanHexNumber(int expected_length);
// Scans a single JavaScript token.
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index 03e69bf38..e2ae85432 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -78,7 +78,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Scope* scope, Zone* zone) {
// Encode the flags.
int flags = ScopeTypeField::encode(scope->scope_type()) |
CallsEvalField::encode(scope->calls_eval()) |
- LanguageModeField::encode(scope->language_mode()) |
+ StrictModeField::encode(scope->strict_mode()) |
FunctionVariableField::encode(function_name_info) |
FunctionVariableMode::encode(function_variable_mode);
scope_info->SetFlags(flags);
@@ -164,8 +164,8 @@ bool ScopeInfo::CallsEval() {
}
-LanguageMode ScopeInfo::language_mode() {
- return length() > 0 ? LanguageModeField::decode(Flags()) : CLASSIC_MODE;
+StrictMode ScopeInfo::strict_mode() {
+ return length() > 0 ? StrictModeField::decode(Flags()) : SLOPPY;
}
@@ -378,7 +378,7 @@ bool ScopeInfo::CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
Handle<String>(String::cast(scope_info->get(i))),
Handle<Object>(context->get(context_index), isolate),
::NONE,
- kNonStrictMode);
+ SLOPPY);
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, false);
}
return true;
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index 97b67bd5a..bcb643501 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -190,9 +190,8 @@ void Scope::SetDefaults(ScopeType scope_type,
scope_contains_with_ = false;
scope_calls_eval_ = false;
// Inherit the strict mode from the parent scope.
- language_mode_ = (outer_scope != NULL)
- ? outer_scope->language_mode_ : CLASSIC_MODE;
- outer_scope_calls_non_strict_eval_ = false;
+ strict_mode_ = outer_scope != NULL ? outer_scope->strict_mode_ : SLOPPY;
+ outer_scope_calls_sloppy_eval_ = false;
inner_scope_calls_eval_ = false;
force_eager_compilation_ = false;
force_context_allocation_ = (outer_scope != NULL && !is_function_scope())
@@ -207,7 +206,7 @@ void Scope::SetDefaults(ScopeType scope_type,
end_position_ = RelocInfo::kNoPosition;
if (!scope_info.is_null()) {
scope_calls_eval_ = scope_info->CallsEval();
- language_mode_ = scope_info->language_mode();
+ strict_mode_ = scope_info->strict_mode();
}
}
@@ -307,7 +306,7 @@ bool Scope::Analyze(CompilationInfo* info) {
}
#endif
- info->SetScope(scope);
+ info->PrepareForCompilation(scope);
return true;
}
@@ -470,7 +469,7 @@ Variable* Scope::DeclareLocal(Handle<String> name,
InitializationFlag init_flag,
Interface* interface) {
ASSERT(!already_resolved());
- // This function handles VAR and CONST modes. DYNAMIC variables are
+ // This function handles VAR, LET, and CONST modes. DYNAMIC variables are
// introduces during variable allocation, INTERNAL variables are allocated
// explicitly, and TEMPORARY variables are allocated via NewTemporary().
ASSERT(IsDeclaredVariableMode(mode));
@@ -643,13 +642,13 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
bool Scope::AllocateVariables(CompilationInfo* info,
AstNodeFactory<AstNullVisitor>* factory) {
// 1) Propagate scope information.
- bool outer_scope_calls_non_strict_eval = false;
+ bool outer_scope_calls_sloppy_eval = false;
if (outer_scope_ != NULL) {
- outer_scope_calls_non_strict_eval =
- outer_scope_->outer_scope_calls_non_strict_eval() |
- outer_scope_->calls_non_strict_eval();
+ outer_scope_calls_sloppy_eval =
+ outer_scope_->outer_scope_calls_sloppy_eval() |
+ outer_scope_->calls_sloppy_eval();
}
- PropagateScopeInfo(outer_scope_calls_non_strict_eval);
+ PropagateScopeInfo(outer_scope_calls_sloppy_eval);
// 2) Allocate module instances.
if (FLAG_harmony_modules && (is_global_scope() || is_module_scope())) {
@@ -881,21 +880,14 @@ void Scope::Print(int n) {
if (HasTrivialOuterContext()) {
Indent(n1, "// scope has trivial outer context\n");
}
- switch (language_mode()) {
- case CLASSIC_MODE:
- break;
- case STRICT_MODE:
- Indent(n1, "// strict mode scope\n");
- break;
- case EXTENDED_MODE:
- Indent(n1, "// extended mode scope\n");
- break;
+ if (strict_mode() == STRICT) {
+ Indent(n1, "// strict mode scope\n");
}
if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
if (scope_contains_with_) Indent(n1, "// scope contains 'with'\n");
if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
- if (outer_scope_calls_non_strict_eval_) {
- Indent(n1, "// outer scope calls 'eval' in non-strict context\n");
+ if (outer_scope_calls_sloppy_eval_) {
+ Indent(n1, "// outer scope calls 'eval' in sloppy context\n");
}
if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
if (num_stack_slots_ > 0) { Indent(n1, "// ");
@@ -1017,9 +1009,9 @@ Variable* Scope::LookupRecursive(Handle<String> name,
// object).
*binding_kind = DYNAMIC_LOOKUP;
return NULL;
- } else if (calls_non_strict_eval()) {
+ } else if (calls_sloppy_eval()) {
// A variable binding may have been found in an outer scope, but the current
- // scope makes a non-strict 'eval' call, so the found variable may not be
+ // scope makes a sloppy 'eval' call, so the found variable may not be
// the correct one (the 'eval' may introduce a binding with the same name).
// In that case, change the lookup result to reflect this situation.
if (*binding_kind == BOUND) {
@@ -1071,8 +1063,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
break;
case UNBOUND_EVAL_SHADOWED:
- // No binding has been found. But some scope makes a
- // non-strict 'eval' call.
+ // No binding has been found. But some scope makes a sloppy 'eval' call.
var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
break;
@@ -1084,7 +1075,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
ASSERT(var != NULL);
- if (FLAG_harmony_scoping && is_extended_mode() &&
+ if (FLAG_harmony_scoping && strict_mode() == STRICT &&
var->is_const_mode() && proxy->IsLValue()) {
// Assignment to const. Throw a syntax error.
MessageLocation location(
@@ -1123,7 +1114,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
Isolate* isolate = info->isolate();
Factory* factory = isolate->factory();
Handle<JSArray> array = factory->NewJSArray(1);
- USE(JSObject::SetElement(array, 0, var->name(), NONE, kStrictMode));
+ USE(JSObject::SetElement(array, 0, var->name(), NONE, STRICT));
Handle<Object> result =
factory->NewSyntaxError("module_type_error", array);
isolate->Throw(*result, &location);
@@ -1157,16 +1148,16 @@ bool Scope::ResolveVariablesRecursively(
}
-bool Scope::PropagateScopeInfo(bool outer_scope_calls_non_strict_eval ) {
- if (outer_scope_calls_non_strict_eval) {
- outer_scope_calls_non_strict_eval_ = true;
+bool Scope::PropagateScopeInfo(bool outer_scope_calls_sloppy_eval ) {
+ if (outer_scope_calls_sloppy_eval) {
+ outer_scope_calls_sloppy_eval_ = true;
}
- bool calls_non_strict_eval =
- this->calls_non_strict_eval() || outer_scope_calls_non_strict_eval_;
+ bool calls_sloppy_eval =
+ this->calls_sloppy_eval() || outer_scope_calls_sloppy_eval_;
for (int i = 0; i < inner_scopes_.length(); i++) {
Scope* inner_scope = inner_scopes_[i];
- if (inner_scope->PropagateScopeInfo(calls_non_strict_eval)) {
+ if (inner_scope->PropagateScopeInfo(calls_sloppy_eval)) {
inner_scope_calls_eval_ = true;
}
if (inner_scope->force_eager_compilation_) {
@@ -1246,7 +1237,7 @@ void Scope::AllocateParameterLocals() {
Variable* arguments = LocalLookup(isolate_->factory()->arguments_string());
ASSERT(arguments != NULL); // functions have 'arguments' declared implicitly
- bool uses_nonstrict_arguments = false;
+ bool uses_sloppy_arguments = false;
if (MustAllocate(arguments) && !HasArgumentsParameter()) {
// 'arguments' is used. Unless there is also a parameter called
@@ -1265,7 +1256,7 @@ void Scope::AllocateParameterLocals() {
// In strict mode 'arguments' does not alias formal parameters.
// Therefore in strict mode we allocate parameters as if 'arguments'
// were not used.
- uses_nonstrict_arguments = is_classic_mode();
+ uses_sloppy_arguments = strict_mode() == SLOPPY;
}
// The same parameter may occur multiple times in the parameters_ list.
@@ -1275,7 +1266,7 @@ void Scope::AllocateParameterLocals() {
for (int i = params_.length() - 1; i >= 0; --i) {
Variable* var = params_[i];
ASSERT(var->scope() == this);
- if (uses_nonstrict_arguments) {
+ if (uses_sloppy_arguments) {
// Force context allocation of the parameter.
var->ForceContextAllocation();
}
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index 06aaa902c..b0d84343e 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -234,9 +234,7 @@ class Scope: public ZoneObject {
void RecordEvalCall() { if (!is_global_scope()) scope_calls_eval_ = true; }
// Set the strict mode flag (unless disabled by a global flag).
- void SetLanguageMode(LanguageMode language_mode) {
- language_mode_ = language_mode;
- }
+ void SetStrictMode(StrictMode strict_mode) { strict_mode_ = strict_mode; }
// Position in the source where this scope begins and ends.
//
@@ -293,23 +291,17 @@ class Scope: public ZoneObject {
return is_eval_scope() || is_function_scope() ||
is_module_scope() || is_global_scope();
}
- bool is_classic_mode() const {
- return language_mode() == CLASSIC_MODE;
- }
- bool is_extended_mode() const {
- return language_mode() == EXTENDED_MODE;
- }
- bool is_strict_or_extended_eval_scope() const {
- return is_eval_scope() && !is_classic_mode();
+ bool is_strict_eval_scope() const {
+ return is_eval_scope() && strict_mode_ == STRICT;
}
// Information about which scopes calls eval.
bool calls_eval() const { return scope_calls_eval_; }
- bool calls_non_strict_eval() {
- return scope_calls_eval_ && is_classic_mode();
+ bool calls_sloppy_eval() {
+ return scope_calls_eval_ && strict_mode_ == SLOPPY;
}
- bool outer_scope_calls_non_strict_eval() const {
- return outer_scope_calls_non_strict_eval_;
+ bool outer_scope_calls_sloppy_eval() const {
+ return outer_scope_calls_sloppy_eval_;
}
// Is this scope inside a with statement.
@@ -324,7 +316,7 @@ class Scope: public ZoneObject {
ScopeType scope_type() const { return scope_type_; }
// The language mode of this scope.
- LanguageMode language_mode() const { return language_mode_; }
+ StrictMode strict_mode() const { return strict_mode_; }
// The variable corresponding the 'this' value.
Variable* receiver() { return receiver_; }
@@ -493,14 +485,14 @@ class Scope: public ZoneObject {
// This scope or a nested catch scope or with scope contain an 'eval' call. At
// the 'eval' call site this scope is the declaration scope.
bool scope_calls_eval_;
- // The language mode of this scope.
- LanguageMode language_mode_;
+ // The strict mode of this scope.
+ StrictMode strict_mode_;
// Source positions.
int start_position_;
int end_position_;
// Computed via PropagateScopeInfo.
- bool outer_scope_calls_non_strict_eval_;
+ bool outer_scope_calls_sloppy_eval_;
bool inner_scope_calls_eval_;
bool force_eager_compilation_;
bool force_context_allocation_;
@@ -538,13 +530,13 @@ class Scope: public ZoneObject {
// The variable reference could be statically resolved to a variable binding
// which is returned. There is no 'with' statement between the reference and
// the binding and no scope between the reference scope (inclusive) and
- // binding scope (exclusive) makes a non-strict 'eval' call.
+ // binding scope (exclusive) makes a sloppy 'eval' call.
BOUND,
// The variable reference could be statically resolved to a variable binding
// which is returned. There is no 'with' statement between the reference and
// the binding, but some scope between the reference scope (inclusive) and
- // binding scope (exclusive) makes a non-strict 'eval' call, that might
+ // binding scope (exclusive) makes a sloppy 'eval' call, that might
// possibly introduce variable bindings shadowing the found one. Thus the
// found variable binding is just a guess.
BOUND_EVAL_SHADOWED,
@@ -553,13 +545,13 @@ class Scope: public ZoneObject {
// and thus should be considered referencing a global variable. NULL is
// returned. The variable reference is not inside any 'with' statement and
// no scope between the reference scope (inclusive) and global scope
- // (exclusive) makes a non-strict 'eval' call.
+ // (exclusive) makes a sloppy 'eval' call.
UNBOUND,
// The variable reference could not be statically resolved to any binding
// NULL is returned. The variable reference is not inside any 'with'
// statement, but some scope between the reference scope (inclusive) and
- // global scope (exclusive) makes a non-strict 'eval' call, that might
+ // global scope (exclusive) makes a sloppy 'eval' call, that might
// possibly introduce a variable binding. Thus the reference should be
// considered referencing a global variable unless it is shadowed by an
// 'eval' introduced binding.
@@ -591,7 +583,7 @@ class Scope: public ZoneObject {
AstNodeFactory<AstNullVisitor>* factory);
// Scope analysis.
- bool PropagateScopeInfo(bool outer_scope_calls_non_strict_eval);
+ bool PropagateScopeInfo(bool outer_scope_calls_sloppy_eval);
bool HasTrivialContext() const;
// Predicates.
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index 5adc2b899..4048886fd 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -175,6 +175,22 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY)
#undef RUNTIME_ENTRY
+#define RUNTIME_HIDDEN_ENTRY(name, nargs, ressize) \
+ { RUNTIME_FUNCTION, \
+ Runtime::kHidden##name, \
+ "Runtime::Hidden" #name },
+
+ RUNTIME_HIDDEN_FUNCTION_LIST(RUNTIME_HIDDEN_ENTRY)
+#undef RUNTIME_HIDDEN_ENTRY
+
+#define INLINE_OPTIMIZED_ENTRY(name, nargs, ressize) \
+ { RUNTIME_FUNCTION, \
+ Runtime::kInlineOptimized##name, \
+ "Runtime::" #name },
+
+ INLINE_OPTIMIZED_FUNCTION_LIST(INLINE_OPTIMIZED_ENTRY)
+#undef INLINE_OPTIMIZED_ENTRY
+
// IC utilities
#define IC_ENTRY(name) \
{ IC_UTILITY, \
@@ -297,6 +313,11 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
RUNTIME_ENTRY,
1,
"Runtime::PerformGC");
+ // Runtime entries
+ Add(ExternalReference::out_of_memory_function(isolate).address(),
+ RUNTIME_ENTRY,
+ 2,
+ "Runtime::OutOfMemory");
Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
RUNTIME_ENTRY,
4,
@@ -310,11 +331,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
RUNTIME_ENTRY,
6,
"StoreBuffer::StoreBufferOverflow");
- Add(ExternalReference::
- incremental_evacuation_record_write_function(isolate).address(),
- RUNTIME_ENTRY,
- 7,
- "IncrementalMarking::RecordWrite");
// Miscellaneous
Add(ExternalReference::roots_array_start(isolate).address(),
@@ -497,11 +513,12 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
52,
"cpu_features");
- Add(ExternalReference(Runtime::kAllocateInNewSpace, isolate).address(),
+ Add(ExternalReference(Runtime::kHiddenAllocateInNewSpace, isolate).address(),
UNCLASSIFIED,
53,
"Runtime::AllocateInNewSpace");
- Add(ExternalReference(Runtime::kAllocateInTargetSpace, isolate).address(),
+ Add(ExternalReference(
+ Runtime::kHiddenAllocateInTargetSpace, isolate).address(),
UNCLASSIFIED,
54,
"Runtime::AllocateInTargetSpace");
@@ -789,6 +806,7 @@ void Deserializer::Deserialize(Isolate* isolate) {
ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
ASSERT_EQ(NULL, external_reference_decoder_);
external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
+ isolate_->heap()->IterateSmiRoots(this);
isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
isolate_->heap()->RepairFreeListsAfterBoot();
isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
@@ -987,6 +1005,7 @@ void Deserializer::ReadChunk(Object** current,
reinterpret_cast<Address>(current); \
Assembler::deserialization_set_special_target_at( \
location_of_branch_data, \
+ Code::cast(HeapObject::FromAddress(current_object_address)), \
reinterpret_cast<Address>(new_object)); \
location_of_branch_data += Assembler::kSpecialTargetSize; \
current = reinterpret_cast<Object**>(location_of_branch_data); \
@@ -1148,15 +1167,15 @@ void Deserializer::ReadChunk(Object** current,
// allocation point and write a pointer to it to the current object.
ALL_SPACES(kBackref, kPlain, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
-#if V8_TARGET_ARCH_MIPS
+#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL
// Deserialize a new object from pointer found in code and write
- // a pointer to it to the current object. Required only for MIPS, and
- // omitted on the other architectures because it is fully unrolled and
- // would cause bloat.
+ // a pointer to it to the current object. Required only for MIPS or ARM
+ // with ool constant pool, and omitted on the other architectures because
+ // it is fully unrolled and would cause bloat.
ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
// Find a recently deserialized code object using its offset from the
// current allocation point and write a pointer to it to the current
- // object. Required only for MIPS.
+ // object. Required only for MIPS or ARM with ool constant pool.
ALL_SPACES(kBackref, kFromCode, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
#endif
@@ -1253,7 +1272,6 @@ void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
: isolate_(isolate),
sink_(sink),
- current_root_index_(0),
external_reference_encoder_(new ExternalReferenceEncoder(isolate)),
root_index_wave_front_(0) {
// The serializer is meant to be used only to generate initial heap images
@@ -1279,7 +1297,7 @@ void StartupSerializer::SerializeStrongReferences() {
CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
// We don't support serializing installed extensions.
CHECK(!isolate->has_installed_extensions());
-
+ isolate->heap()->IterateSmiRoots(this);
isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
}
@@ -1378,12 +1396,11 @@ int Serializer::RootIndex(HeapObject* heap_object, HowToCode from) {
for (int i = 0; i < root_index_wave_front_; i++) {
Object* root = heap->roots_array_start()[i];
if (!root->IsSmi() && root == heap_object) {
-#if V8_TARGET_ARCH_MIPS
+#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL
if (from == kFromCode) {
// In order to avoid code bloat in the deserializer we don't have
// support for the encoding that specifies a particular root should
- // be written into the lui/ori instructions on MIPS. Therefore we
- // should not generate such serialization data for MIPS.
+ // be written from within code.
return kInvalidRootIndex;
}
#endif
@@ -1636,6 +1653,9 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
+ // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
+ if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
+
int skip = OutputRawData(rinfo->target_address_address(),
kCanReturnSkipInsteadOfSkipping);
HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
@@ -1681,6 +1701,9 @@ void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
+ // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
+ if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
+
int skip = OutputRawData(rinfo->target_address_address(),
kCanReturnSkipInsteadOfSkipping);
Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -1698,6 +1721,9 @@ void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
+ // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
+ if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
+
int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
Cell* object = Cell::cast(rinfo->target_cell());
serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
@@ -1743,7 +1769,9 @@ static void WipeOutRelocations(Code* code) {
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
- it.rinfo()->WipeOut();
+ if (!(FLAG_enable_ool_constant_pool && it.rinfo()->IsInConstantPool())) {
+ it.rinfo()->WipeOut();
+ }
}
}
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index 9229bad40..294714475 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -579,7 +579,6 @@ class Serializer : public SerializerDeserializer {
// relative addresses for back references.
int fullness_[LAST_SPACE + 1];
SnapshotByteSink* sink_;
- int current_root_index_;
ExternalReferenceEncoder* external_reference_encoder_;
static bool serialization_enabled_;
// Did we already make use of the fact that serialization was not enabled?
diff --git a/deps/v8/src/simulator.h b/deps/v8/src/simulator.h
index 485e93064..b61eaa260 100644
--- a/deps/v8/src/simulator.h
+++ b/deps/v8/src/simulator.h
@@ -32,6 +32,8 @@
#include "ia32/simulator-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/simulator-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "arm64/simulator-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/simulator-arm.h"
#elif V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index a80341bd7..6c03daa75 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -483,7 +483,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<int>(area_start - base);
- chunk->parallel_sweeping_ = 0;
+ chunk->set_parallel_sweeping(PARALLEL_SWEEPING_DONE);
chunk->available_in_small_free_list_ = 0;
chunk->available_in_medium_free_list_ = 0;
chunk->available_in_large_free_list_ = 0;
@@ -560,21 +560,12 @@ bool MemoryChunk::CommitArea(size_t requested) {
void MemoryChunk::InsertAfter(MemoryChunk* other) {
- next_chunk_ = other->next_chunk_;
- prev_chunk_ = other;
+ MemoryChunk* other_next = other->next_chunk();
- // This memory barrier is needed since concurrent sweeper threads may iterate
- // over the list of pages while a new page is inserted.
- // TODO(hpayer): find a cleaner way to guarantee that the page list can be
- // expanded concurrently
- MemoryBarrier();
-
- // The following two write operations can take effect in arbitrary order
- // since pages are always iterated by the sweeper threads in LIFO order, i.e,
- // the inserted page becomes visible for the sweeper threads after
- // other->next_chunk_ = this;
- other->next_chunk_->prev_chunk_ = this;
- other->next_chunk_ = this;
+ set_next_chunk(other_next);
+ set_prev_chunk(other);
+ other_next->set_prev_chunk(this);
+ other->set_next_chunk(this);
}
@@ -583,10 +574,12 @@ void MemoryChunk::Unlink() {
heap_->decrement_scan_on_scavenge_pages();
ClearFlag(SCAN_ON_SCAVENGE);
}
- next_chunk_->prev_chunk_ = prev_chunk_;
- prev_chunk_->next_chunk_ = next_chunk_;
- prev_chunk_ = NULL;
- next_chunk_ = NULL;
+ MemoryChunk* next_element = next_chunk();
+ MemoryChunk* prev_element = prev_chunk();
+ next_element->set_prev_chunk(prev_element);
+ prev_element->set_next_chunk(next_element);
+ set_prev_chunk(NULL);
+ set_next_chunk(NULL);
}
@@ -2082,20 +2075,21 @@ void FreeListNode::set_next(FreeListNode* next) {
intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
intptr_t free_bytes = 0;
- if (category->top_ != NULL) {
- ASSERT(category->end_ != NULL);
+ if (category->top() != NULL) {
// This is safe (not going to deadlock) since Concatenate operations
// are never performed on the same free lists at the same time in
// reverse order.
LockGuard<Mutex> target_lock_guard(mutex());
LockGuard<Mutex> source_lock_guard(category->mutex());
+ ASSERT(category->end_ != NULL);
free_bytes = category->available();
if (end_ == NULL) {
end_ = category->end();
} else {
- category->end()->set_next(top_);
+ category->end()->set_next(top());
}
- top_ = category->top();
+ set_top(category->top());
+ NoBarrier_Store(&top_, category->top_);
available_ += category->available();
category->Reset();
}
@@ -2104,15 +2098,16 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
void FreeListCategory::Reset() {
- top_ = NULL;
- end_ = NULL;
- available_ = 0;
+ set_top(NULL);
+ set_end(NULL);
+ set_available(0);
}
intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
int sum = 0;
- FreeListNode** n = &top_;
+ FreeListNode* t = top();
+ FreeListNode** n = &t;
while (*n != NULL) {
if (Page::FromAddress((*n)->address()) == p) {
FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
@@ -2122,8 +2117,9 @@ intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
n = (*n)->next_address();
}
}
- if (top_ == NULL) {
- end_ = NULL;
+ set_top(t);
+ if (top() == NULL) {
+ set_end(NULL);
}
available_ -= sum;
return sum;
@@ -2131,17 +2127,17 @@ intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
- FreeListNode** n = &top_;
- while (*n != NULL) {
- if (Page::FromAddress((*n)->address()) == p) return true;
- n = (*n)->next_address();
+ FreeListNode* node = top();
+ while (node != NULL) {
+ if (Page::FromAddress(node->address()) == p) return true;
+ node = node->next();
}
return false;
}
FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
- FreeListNode* node = top_;
+ FreeListNode* node = top();
if (node == NULL) return NULL;
@@ -2180,8 +2176,8 @@ FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
- node->set_next(top_);
- top_ = node;
+ node->set_next(top());
+ set_top(node);
if (end_ == NULL) {
end_ = node;
}
@@ -2190,7 +2186,7 @@ void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
void FreeListCategory::RepairFreeList(Heap* heap) {
- FreeListNode* n = top_;
+ FreeListNode* n = top();
while (n != NULL) {
Map** map_location = reinterpret_cast<Map**>(n->address());
if (*map_location == NULL) {
@@ -2299,7 +2295,8 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
}
int huge_list_available = huge_list_.available();
- for (FreeListNode** cur = huge_list_.GetTopAddress();
+ FreeListNode* top_node = huge_list_.top();
+ for (FreeListNode** cur = &top_node;
*cur != NULL;
cur = (*cur)->next_address()) {
FreeListNode* cur_node = *cur;
@@ -2333,6 +2330,7 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
}
}
+ huge_list_.set_top(top_node);
if (huge_list_.top() == NULL) {
huge_list_.set_end(NULL);
}
@@ -2486,7 +2484,7 @@ void FreeList::RepairLists(Heap* heap) {
#ifdef DEBUG
intptr_t FreeListCategory::SumFreeList() {
intptr_t sum = 0;
- FreeListNode* cur = top_;
+ FreeListNode* cur = top();
while (cur != NULL) {
ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
@@ -2502,7 +2500,7 @@ static const int kVeryLongFreeList = 500;
int FreeListCategory::FreeListLength() {
int length = 0;
- FreeListNode* cur = top_;
+ FreeListNode* cur = top();
while (cur != NULL) {
length++;
cur = cur->next();
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index 9d47f81ac..908e72382 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -313,11 +313,21 @@ class MemoryChunk {
bool is_valid() { return address() != NULL; }
- MemoryChunk* next_chunk() const { return next_chunk_; }
- MemoryChunk* prev_chunk() const { return prev_chunk_; }
+ MemoryChunk* next_chunk() const {
+ return reinterpret_cast<MemoryChunk*>(Acquire_Load(&next_chunk_));
+ }
+
+ MemoryChunk* prev_chunk() const {
+ return reinterpret_cast<MemoryChunk*>(Acquire_Load(&prev_chunk_));
+ }
- void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
- void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
+ void set_next_chunk(MemoryChunk* next) {
+ Release_Store(&next_chunk_, reinterpret_cast<AtomicWord>(next));
+ }
+
+ void set_prev_chunk(MemoryChunk* prev) {
+ Release_Store(&prev_chunk_, reinterpret_cast<AtomicWord>(prev));
+ }
Space* owner() const {
if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
@@ -457,16 +467,35 @@ class MemoryChunk {
// Return all current flags.
intptr_t GetFlags() { return flags_; }
- intptr_t parallel_sweeping() const {
- return parallel_sweeping_;
+
+ // PARALLEL_SWEEPING_DONE - The page state when sweeping is complete or
+ // sweeping must not be performed on that page.
+ // PARALLEL_SWEEPING_FINALIZE - A sweeper thread is done sweeping this
+ // page and will not touch the page memory anymore.
+ // PARALLEL_SWEEPING_IN_PROGRESS - This page is currently swept by a
+ // sweeper thread.
+ // PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping.
+ enum ParallelSweepingState {
+ PARALLEL_SWEEPING_DONE,
+ PARALLEL_SWEEPING_FINALIZE,
+ PARALLEL_SWEEPING_IN_PROGRESS,
+ PARALLEL_SWEEPING_PENDING
+ };
+
+ ParallelSweepingState parallel_sweeping() {
+ return static_cast<ParallelSweepingState>(
+ Acquire_Load(&parallel_sweeping_));
}
- void set_parallel_sweeping(intptr_t state) {
- parallel_sweeping_ = state;
+ void set_parallel_sweeping(ParallelSweepingState state) {
+ Release_Store(&parallel_sweeping_, state);
}
bool TryParallelSweeping() {
- return NoBarrier_CompareAndSwap(&parallel_sweeping_, 1, 0) == 1;
+ return Acquire_CompareAndSwap(&parallel_sweeping_,
+ PARALLEL_SWEEPING_PENDING,
+ PARALLEL_SWEEPING_IN_PROGRESS) ==
+ PARALLEL_SWEEPING_PENDING;
}
// Manage live byte count (count of bytes known to be live,
@@ -536,7 +565,7 @@ class MemoryChunk {
static const intptr_t kAlignmentMask = kAlignment - 1;
- static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
+ static const intptr_t kSizeOffset = 0;
static const intptr_t kLiveBytesOffset =
kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
@@ -550,7 +579,8 @@ class MemoryChunk {
static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
kIntSize + kIntSize + kPointerSize +
- 5 * kPointerSize;
+ 5 * kPointerSize +
+ kPointerSize + kPointerSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
@@ -622,7 +652,7 @@ class MemoryChunk {
inline Heap* heap() { return heap_; }
- static const int kFlagsOffset = kPointerSize * 3;
+ static const int kFlagsOffset = kPointerSize;
bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
@@ -671,8 +701,6 @@ class MemoryChunk {
static inline void UpdateHighWaterMark(Address mark);
protected:
- MemoryChunk* next_chunk_;
- MemoryChunk* prev_chunk_;
size_t size_;
intptr_t flags_;
@@ -702,7 +730,7 @@ class MemoryChunk {
// count highest number of bytes ever allocated on the page.
int high_water_mark_;
- intptr_t parallel_sweeping_;
+ AtomicWord parallel_sweeping_;
// PagedSpace free-list statistics.
intptr_t available_in_small_free_list_;
@@ -719,6 +747,12 @@ class MemoryChunk {
Executability executable,
Space* owner);
+ private:
+ // next_chunk_ holds a pointer of type MemoryChunk
+ AtomicWord next_chunk_;
+ // prev_chunk_ holds a pointer of type MemoryChunk
+ AtomicWord prev_chunk_;
+
friend class MemoryAllocator;
};
@@ -1503,7 +1537,7 @@ class FreeListNode: public HeapObject {
class FreeListCategory {
public:
FreeListCategory() :
- top_(NULL),
+ top_(0),
end_(NULL),
available_(0) {}
@@ -1521,9 +1555,13 @@ class FreeListCategory {
void RepairFreeList(Heap* heap);
- FreeListNode** GetTopAddress() { return &top_; }
- FreeListNode* top() const { return top_; }
- void set_top(FreeListNode* top) { top_ = top; }
+ FreeListNode* top() const {
+ return reinterpret_cast<FreeListNode*>(NoBarrier_Load(&top_));
+ }
+
+ void set_top(FreeListNode* top) {
+ NoBarrier_Store(&top_, reinterpret_cast<AtomicWord>(top));
+ }
FreeListNode** GetEndAddress() { return &end_; }
FreeListNode* end() const { return end_; }
@@ -1536,7 +1574,7 @@ class FreeListCategory {
Mutex* mutex() { return &mutex_; }
bool IsEmpty() {
- return top_ == NULL;
+ return top() == 0;
}
#ifdef DEBUG
@@ -1545,7 +1583,8 @@ class FreeListCategory {
#endif
private:
- FreeListNode* top_;
+ // top_ points to the top FreeListNode* in the free list category.
+ AtomicWord top_;
FreeListNode* end_;
Mutex mutex_;
diff --git a/deps/v8/src/store-buffer.cc b/deps/v8/src/store-buffer.cc
index e89eb1bfe..a1479b2b9 100644
--- a/deps/v8/src/store-buffer.cc
+++ b/deps/v8/src/store-buffer.cc
@@ -509,10 +509,12 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
// be marked with a free space or filler. Because the free space and filler
// maps do not move we can always recognize these even after a compaction.
// Normal objects like FixedArrays and JSObjects should not contain references
-// to these maps. The special garbage section (see comment in spaces.h) is
-// skipped since it can contain absolutely anything. Any objects that are
-// allocated during iteration may or may not be visited by the iteration, but
-// they will not be partially visited.
+// to these maps. Constant pool array objects may contain references to these
+// maps, however, constant pool arrays cannot contain pointers to new space
+// objects, therefore they are skipped. The special garbage section (see
+// comment in spaces.h) is skipped since it can contain absolutely anything.
+// Any objects that are allocated during iteration may or may not be visited by
+// the iteration, but they will not be partially visited.
void StoreBuffer::FindPointersToNewSpaceOnPage(
PagedSpace* space,
Page* page,
@@ -526,13 +528,17 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
Object* free_space_map = heap_->free_space_map();
Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
+ Object* constant_pool_array_map = heap_->constant_pool_array_map();
while (visitable_end < end_of_page) {
Object* o = *reinterpret_cast<Object**>(visitable_end);
- // Skip fillers but not things that look like fillers in the special
- // garbage section which can contain anything.
+ // Skip fillers or constant pool arrays (which never contain new-space
+ // pointers but can contain pointers which can be confused for fillers)
+ // but not things that look like fillers in the special garbage section
+ // which can contain anything.
if (o == free_space_map ||
o == two_pointer_filler_map ||
+ o == constant_pool_array_map ||
(visitable_end == space->top() && visitable_end != space->limit())) {
if (visitable_start != visitable_end) {
// After calling this the special garbage section may have moved.
@@ -549,12 +555,12 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
if (visitable_end == space->top() && visitable_end != space->limit()) {
visitable_start = visitable_end = space->limit();
} else {
- // At this point we are either at the start of a filler or we are at
- // the point where the space->top() used to be before the
- // visit_pointer_region call above. Either way we can skip the
- // object at the current spot: We don't promise to visit objects
- // allocated during heap traversal, and if space->top() moved then it
- // must be because an object was allocated at this point.
+ // At this point we are either at the start of a filler, a
+ // constant pool array, or we are at the point where the space->top()
+ // used to be before the visit_pointer_region call above. Either way we
+ // can skip the object at the current spot: We don't promise to visit
+ // objects allocated during heap traversal, and if space->top() moved
+ // then it must be because an object was allocated at this point.
visitable_start =
visitable_end + HeapObject::FromAddress(visitable_end)->Size();
visitable_end = visitable_start;
@@ -562,6 +568,7 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
} else {
ASSERT(o != free_space_map);
ASSERT(o != two_pointer_filler_map);
+ ASSERT(o != constant_pool_array_map);
ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
visitable_end += kPointerSize;
}
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 5dfce55fb..ff641dddf 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -116,9 +116,9 @@ Handle<Code> StubCache::FindIC(Handle<Name> name,
Handle<Code> StubCache::FindHandler(Handle<Name> name,
Handle<Map> stub_holder,
Code::Kind kind,
- InlineCacheHolderFlag cache_holder) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::HANDLER, kNoExtraICState, cache_holder, Code::NORMAL, kind);
+ InlineCacheHolderFlag cache_holder,
+ Code::StubType type) {
+ Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder);
Handle<Object> probe(stub_holder->FindInCodeCache(*name, flags), isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -127,11 +127,11 @@ Handle<Code> StubCache::FindHandler(Handle<Name> name,
Handle<Code> StubCache::ComputeMonomorphicIC(
+ Code::Kind kind,
Handle<Name> name,
Handle<HeapType> type,
Handle<Code> handler,
ExtraICState extra_ic_state) {
- Code::Kind kind = handler->handler_kind();
InlineCacheHolderFlag flag = IC::GetCodeCacheFlag(*type);
Handle<Map> stub_holder;
@@ -179,7 +179,7 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
// therefore the stub will be specific to the name.
Handle<Map> current_map = stub_holder;
Handle<Name> cache_name = current_map->is_dictionary_map()
- ? name : Handle<Name>::cast(isolate()->factory()->empty_string());
+ ? name : Handle<Name>::cast(isolate()->factory()->nonexistent_symbol());
Handle<Object> next(current_map->prototype(), isolate());
Handle<JSObject> last = Handle<JSObject>::null();
while (!next->IsNull()) {
@@ -192,8 +192,10 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
// Compile the stub that is either shared for all names or
// name specific if there are global objects involved.
Handle<Code> handler = FindHandler(
- cache_name, stub_holder, Code::LOAD_IC, flag);
- if (!handler.is_null()) return handler;
+ cache_name, stub_holder, Code::LOAD_IC, flag, Code::FAST);
+ if (!handler.is_null()) {
+ return handler;
+ }
LoadStubCompiler compiler(isolate_, kNoExtraICState, flag);
handler = compiler.CompileLoadNonexistent(type, last, cache_name);
@@ -220,7 +222,7 @@ Handle<Code> StubCache::ComputeKeyedLoadElement(Handle<Map> receiver_map) {
Handle<Code> StubCache::ComputeKeyedStoreElement(
Handle<Map> receiver_map,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
KeyedAccessStoreMode store_mode) {
ExtraICState extra_state =
KeyedStoreIC::ComputeExtraICState(strict_mode, store_mode);
@@ -331,8 +333,9 @@ Handle<Code> StubCache::ComputeCompareNil(Handle<Map> receiver_map,
if (!cached_ic.is_null()) return cached_ic;
}
- Handle<Code> ic = stub.GetCodeCopyFromTemplate(isolate_);
- ic->ReplaceNthObject(1, isolate_->heap()->meta_map(), *receiver_map);
+ Code::FindAndReplacePattern pattern;
+ pattern.Add(isolate_->factory()->meta_map(), receiver_map);
+ Handle<Code> ic = stub.GetCodeCopy(isolate_, pattern);
if (!receiver_map->is_shared()) {
Map::UpdateCodeCache(receiver_map, name, ic);
@@ -369,14 +372,13 @@ Handle<Code> StubCache::ComputeLoadElementPolymorphic(
Handle<Code> StubCache::ComputePolymorphicIC(
+ Code::Kind kind,
TypeHandleList* types,
CodeHandleList* handlers,
int number_of_valid_types,
Handle<Name> name,
ExtraICState extra_ic_state) {
-
Handle<Code> handler = handlers->at(0);
- Code::Kind kind = handler->handler_kind();
Code::StubType type = number_of_valid_types == 1 ? handler->type()
: Code::NORMAL;
if (kind == Code::LOAD_IC) {
@@ -395,7 +397,7 @@ Handle<Code> StubCache::ComputePolymorphicIC(
Handle<Code> StubCache::ComputeStoreElementPolymorphic(
MapHandleList* receiver_maps,
KeyedAccessStoreMode store_mode,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
ASSERT(store_mode == STANDARD_STORE ||
store_mode == STORE_AND_GROW_NO_TRANSITION ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
@@ -662,10 +664,14 @@ RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) {
RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor) {
- JSObject* receiver = JSObject::cast(args[0]);
+ HandleScope scope(isolate);
+ Handle<JSObject> receiver = args.at<JSObject>(0);
ASSERT(args.smi_at(1) >= 0);
uint32_t index = args.smi_at(1);
- return receiver->GetElementWithInterceptor(receiver, index);
+ Handle<Object> result =
+ JSObject::GetElementWithInterceptor(receiver, receiver, index);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -690,9 +696,7 @@ Handle<Code> StubCompiler::CompileLoadPreMonomorphic(Code::Flags flags) {
Handle<Code> StubCompiler::CompileLoadMegamorphic(Code::Flags flags) {
- ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- ContextualMode mode = LoadIC::GetContextualMode(extra_state);
- LoadIC::GenerateMegamorphic(masm(), mode);
+ LoadIC::GenerateMegamorphic(masm());
Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadMegamorphic");
PROFILE(isolate(),
CodeCreateEvent(Logger::LOAD_MEGAMORPHIC_TAG, *code, 0));
@@ -723,7 +727,7 @@ Handle<Code> StubCompiler::CompileStorePreMonomorphic(Code::Flags flags) {
Handle<Code> StubCompiler::CompileStoreGeneric(Code::Flags flags) {
ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- StrictModeFlag strict_mode = StoreIC::GetStrictMode(extra_state);
+ StrictMode strict_mode = StoreIC::GetStrictMode(extra_state);
StoreIC::GenerateRuntimeSetProperty(masm(), strict_mode);
Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreGeneric");
PROFILE(isolate(),
@@ -734,8 +738,7 @@ Handle<Code> StubCompiler::CompileStoreGeneric(Code::Flags flags) {
Handle<Code> StubCompiler::CompileStoreMegamorphic(Code::Flags flags) {
- ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- StoreIC::GenerateMegamorphic(masm(), extra_state);
+ StoreIC::GenerateMegamorphic(masm());
Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreMegamorphic");
PROFILE(isolate(),
CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG, *code, 0));
@@ -951,8 +954,10 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
ASSERT(call_optimization.is_simple_api_call());
Handle<JSFunction> callback = call_optimization.constant_function();
CallbackHandlerFrontend(type, receiver(), holder, name, callback);
- GenerateLoadCallback(call_optimization, IC::TypeToMap(*type, isolate()));
-
+ Handle<Map>receiver_map = IC::TypeToMap(*type, isolate());
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver_map,
+ receiver(), scratch1(), false, 0, NULL);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
@@ -1118,6 +1123,30 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
}
+Handle<Code> StoreStubCompiler::CompileStoreArrayLength(Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name) {
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
+ Label miss;
+
+ // Check that value is a smi.
+ __ JumpIfNotSmi(value(), &miss);
+
+ // Generate tail call to StoreIC_ArrayLength.
+ GenerateStoreArrayLength();
+
+ // Handle miss case.
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
Handle<JSObject> object,
Handle<JSObject> holder,
@@ -1125,8 +1154,24 @@ Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
Handle<JSFunction> setter) {
Handle<HeapType> type = IC::CurrentTypeOf(object, isolate());
HandlerFrontend(type, receiver(), holder, name);
- GenerateStoreViaSetter(masm(), type, setter);
+ GenerateStoreViaSetter(masm(), type, receiver(), setter);
+
+ return GetCode(kind(), Code::FAST, name);
+}
+
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, handle(object->map()),
+ receiver(), scratch1(), true, 1, values);
+ // Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
@@ -1236,8 +1281,8 @@ Handle<Code> BaseLoadStoreStubCompiler::GetICCode(Code::Kind kind,
Handle<Code> BaseLoadStoreStubCompiler::GetCode(Code::Kind kind,
Code::StubType type,
Handle<Name> name) {
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_state(), type, kind, cache_holder_);
+ ASSERT_EQ(kNoExtraICState, extra_state());
+ Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder_);
Handle<Code> code = GetCodeWithFlags(flags, name);
PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
JitEvent(name, code);
@@ -1265,6 +1310,8 @@ void KeyedLoadStubCompiler::CompileElementHandlers(MapHandleList* receiver_maps,
cached_stub =
KeyedLoadFastElementStub(is_js_array,
elements_kind).GetCode(isolate());
+ } else if (elements_kind == SLOPPY_ARGUMENTS_ELEMENTS) {
+ cached_stub = isolate()->builtins()->KeyedLoadIC_SloppyArguments();
} else {
ASSERT(elements_kind == DICTIONARY_ELEMENTS);
cached_stub = KeyedLoadDictionaryElementStub().GetCode(isolate());
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index f55c440ea..7a304fe71 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -89,9 +89,11 @@ class StubCache {
Handle<Code> FindHandler(Handle<Name> name,
Handle<Map> map,
Code::Kind kind,
- InlineCacheHolderFlag cache_holder = OWN_MAP);
+ InlineCacheHolderFlag cache_holder,
+ Code::StubType type);
- Handle<Code> ComputeMonomorphicIC(Handle<Name> name,
+ Handle<Code> ComputeMonomorphicIC(Code::Kind kind,
+ Handle<Name> name,
Handle<HeapType> type,
Handle<Code> handler,
ExtraICState extra_ic_state);
@@ -101,7 +103,7 @@ class StubCache {
Handle<Code> ComputeKeyedLoadElement(Handle<Map> receiver_map);
Handle<Code> ComputeKeyedStoreElement(Handle<Map> receiver_map,
- StrictModeFlag strict_mode,
+ StrictMode strict_mode,
KeyedAccessStoreMode store_mode);
// ---
@@ -120,9 +122,10 @@ class StubCache {
Handle<Code> ComputeLoadElementPolymorphic(MapHandleList* receiver_maps);
Handle<Code> ComputeStoreElementPolymorphic(MapHandleList* receiver_maps,
KeyedAccessStoreMode store_mode,
- StrictModeFlag strict_mode);
+ StrictMode strict_mode);
- Handle<Code> ComputePolymorphicIC(TypeHandleList* types,
+ Handle<Code> ComputePolymorphicIC(Code::Kind kind,
+ TypeHandleList* types,
CodeHandleList* handlers,
int number_of_valid_maps,
Handle<Name> name,
@@ -357,12 +360,6 @@ class StubCompiler BASE_EMBEDDED {
Register scratch,
Label* miss_label);
- static void GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label);
-
static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@@ -404,6 +401,15 @@ class StubCompiler BASE_EMBEDDED {
void GenerateBooleanCheck(Register object, Label* miss);
+ static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch,
+ bool is_store,
+ int argc,
+ Register* values);
+
protected:
Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<Name> name);
@@ -510,11 +516,11 @@ class BaseLoadStoreStubCompiler: public StubCompiler {
}
void JitEvent(Handle<Name> name, Handle<Code> code);
- virtual Register receiver() = 0;
- virtual Register name() = 0;
- virtual Register scratch1() = 0;
- virtual Register scratch2() = 0;
- virtual Register scratch3() = 0;
+ Register receiver() { return registers_[0]; }
+ Register name() { return registers_[1]; }
+ Register scratch1() { return registers_[2]; }
+ Register scratch2() { return registers_[3]; }
+ Register scratch3() { return registers_[4]; }
void InitializeRegisters();
@@ -571,6 +577,11 @@ class LoadStubCompiler: public BaseLoadStoreStubCompiler {
Register receiver,
Handle<JSFunction> getter);
+ static void GenerateLoadViaGetterForDeopt(MacroAssembler* masm) {
+ GenerateLoadViaGetter(
+ masm, Handle<HeapType>::null(), no_reg, Handle<JSFunction>());
+ }
+
Handle<Code> CompileLoadNonexistent(Handle<HeapType> type,
Handle<JSObject> last,
Handle<Name> name);
@@ -581,8 +592,6 @@ class LoadStubCompiler: public BaseLoadStoreStubCompiler {
Handle<Name> name,
bool is_dont_delete);
- static Register* registers();
-
protected:
ContextualMode contextual_mode() {
return LoadIC::GetContextualMode(extra_state());
@@ -624,12 +633,10 @@ class LoadStubCompiler: public BaseLoadStoreStubCompiler {
Handle<Name> name,
LookupResult* lookup);
- virtual Register receiver() { return registers_[0]; }
- virtual Register name() { return registers_[1]; }
- virtual Register scratch1() { return registers_[2]; }
- virtual Register scratch2() { return registers_[3]; }
- virtual Register scratch3() { return registers_[4]; }
+ private:
+ static Register* registers();
Register scratch4() { return registers_[5]; }
+ friend class BaseLoadStoreStubCompiler;
};
@@ -672,6 +679,12 @@ class StoreStubCompiler: public BaseLoadStoreStubCompiler {
LookupResult* lookup,
Handle<Name> name);
+ Handle<Code> CompileStoreArrayLength(Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name);
+
+ void GenerateStoreArrayLength();
+
void GenerateNegativeHolderLookup(MacroAssembler* masm,
Handle<JSObject> holder,
Register holder_reg,
@@ -714,8 +727,14 @@ class StoreStubCompiler: public BaseLoadStoreStubCompiler {
static void GenerateStoreViaSetter(MacroAssembler* masm,
Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter);
+ static void GenerateStoreViaSetterForDeopt(MacroAssembler* masm) {
+ GenerateStoreViaSetter(
+ masm, Handle<HeapType>::null(), no_reg, Handle<JSFunction>());
+ }
+
Handle<Code> CompileStoreViaSetter(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
@@ -745,17 +764,9 @@ class StoreStubCompiler: public BaseLoadStoreStubCompiler {
Label* label,
Handle<Name> name);
- virtual Register receiver() { return registers_[0]; }
- virtual Register name() { return registers_[1]; }
- Register value() { return registers_[2]; }
- virtual Register scratch1() { return registers_[3]; }
- virtual Register scratch2() { return registers_[4]; }
- virtual Register scratch3() { return registers_[5]; }
-
- protected:
- static Register* registers();
-
private:
+ static Register* registers();
+ static Register value();
friend class BaseLoadStoreStubCompiler;
};
@@ -783,9 +794,7 @@ class KeyedStoreStubCompiler: public StoreStubCompiler {
return KeyedStoreIC::GetKeyedAccessStoreMode(extra_state());
}
- Register transition_map() {
- return registers()[3];
- }
+ Register transition_map() { return scratch1(); }
friend class BaseLoadStoreStubCompiler;
};
diff --git a/deps/v8/src/sweeper-thread.cc b/deps/v8/src/sweeper-thread.cc
index 097b594a7..7e8305abe 100644
--- a/deps/v8/src/sweeper-thread.cc
+++ b/deps/v8/src/sweeper-thread.cc
@@ -45,6 +45,7 @@ SweeperThread::SweeperThread(Isolate* isolate)
start_sweeping_semaphore_(0),
end_sweeping_semaphore_(0),
stop_semaphore_(0) {
+ ASSERT(!FLAG_job_based_sweeping);
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
}
diff --git a/deps/v8/src/symbol.js b/deps/v8/src/symbol.js
index be308d947..e7ea5a68d 100644
--- a/deps/v8/src/symbol.js
+++ b/deps/v8/src/symbol.js
@@ -36,39 +36,60 @@ var $Symbol = global.Symbol;
// -------------------------------------------------------------------
function SymbolConstructor(x) {
- var value =
- IS_SYMBOL(x) ? x : %CreateSymbol(IS_UNDEFINED(x) ? x : ToString(x));
if (%_IsConstructCall()) {
- %_SetValueOf(this, value);
- } else {
- return value;
+ throw MakeTypeError('not_constructor', ["Symbol"]);
}
+ // NOTE: Passing in a Symbol value will throw on ToString().
+ return %CreateSymbol(IS_UNDEFINED(x) ? x : ToString(x));
}
-function SymbolGetName() {
- var symbol = IS_SYMBOL_WRAPPER(this) ? %_ValueOf(this) : this;
- if (!IS_SYMBOL(symbol)) {
+
+function SymbolToString() {
+ if (!(IS_SYMBOL(this) || IS_SYMBOL_WRAPPER(this))) {
throw MakeTypeError(
- 'incompatible_method_receiver', ["Symbol.prototype.name", this]);
+ 'incompatible_method_receiver', ["Symbol.prototype.toString", this]);
}
- return %SymbolName(symbol);
+ var description = %SymbolDescription(%_ValueOf(this));
+ return "Symbol(" + (IS_UNDEFINED(description) ? "" : description) + ")";
}
-function SymbolToString() {
- throw MakeTypeError('symbol_to_string');
-}
function SymbolValueOf() {
- // NOTE: Both Symbol objects and values can enter here as
- // 'this'. This is not as dictated by ECMA-262.
- if (!IS_SYMBOL(this) && !IS_SYMBOL_WRAPPER(this)) {
+ if (!(IS_SYMBOL(this) || IS_SYMBOL_WRAPPER(this))) {
throw MakeTypeError(
- 'incompatible_method_receiver', ["Symbol.prototype.valueOf", this]);
+ 'incompatible_method_receiver', ["Symbol.prototype.valueOf", this]);
}
return %_ValueOf(this);
}
+function InternalSymbol(key) {
+ var internal_registry = %SymbolRegistry().for_intern;
+ if (IS_UNDEFINED(internal_registry[key])) {
+ internal_registry[key] = %CreateSymbol(key);
+ }
+ return internal_registry[key];
+}
+
+
+function SymbolFor(key) {
+ key = TO_STRING_INLINE(key);
+ var registry = %SymbolRegistry();
+ if (IS_UNDEFINED(registry.for[key])) {
+ var symbol = %CreateSymbol(key);
+ registry.for[key] = symbol;
+ registry.keyFor[symbol] = key;
+ }
+ return registry.for[key];
+}
+
+
+function SymbolKeyFor(symbol) {
+ if (!IS_SYMBOL(symbol)) throw MakeTypeError("not_a_symbol", [symbol]);
+ return %SymbolRegistry().keyFor[symbol];
+}
+
+
// ES6 19.1.2.8
function ObjectGetOwnPropertySymbols(obj) {
if (!IS_SPEC_OBJECT(obj)) {
@@ -84,14 +105,38 @@ function ObjectGetOwnPropertySymbols(obj) {
//-------------------------------------------------------------------
+var symbolCreate = InternalSymbol("Symbol.create");
+var symbolHasInstance = InternalSymbol("Symbol.hasInstance");
+var symbolIsConcatSpreadable = InternalSymbol("Symbol.isConcatSpreadable");
+var symbolIsRegExp = InternalSymbol("Symbol.isRegExp");
+var symbolIterator = InternalSymbol("Symbol.iterator");
+var symbolToStringTag = InternalSymbol("Symbol.toStringTag");
+var symbolUnscopables = InternalSymbol("Symbol.unscopables");
+
+
+//-------------------------------------------------------------------
+
function SetUpSymbol() {
%CheckIsBootstrapping();
%SetCode($Symbol, SymbolConstructor);
- %FunctionSetPrototype($Symbol, new $Symbol());
- %SetProperty($Symbol.prototype, "constructor", $Symbol, DONT_ENUM);
+ %FunctionSetPrototype($Symbol, new $Object());
+
+ InstallConstants($Symbol, $Array(
+ "create", symbolCreate,
+ "hasInstance", symbolHasInstance,
+ "isConcatSpreadable", symbolIsConcatSpreadable,
+ "isRegExp", symbolIsRegExp,
+ "iterator", symbolIterator,
+ "toStringTag", symbolToStringTag,
+ "unscopables", symbolUnscopables
+ ));
+ InstallFunctions($Symbol, DONT_ENUM, $Array(
+ "for", SymbolFor,
+ "keyFor", SymbolKeyFor
+ ));
- InstallGetter($Symbol.prototype, "name", SymbolGetName);
+ %SetProperty($Symbol.prototype, "constructor", $Symbol, DONT_ENUM);
InstallFunctions($Symbol.prototype, DONT_ENUM, $Array(
"toString", SymbolToString,
"valueOf", SymbolValueOf
diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h
index 39bcc2407..8efaa477b 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/token.h
@@ -73,7 +73,7 @@ namespace internal {
T(INIT_VAR, "=init_var", 2) /* AST-use only. */ \
T(INIT_LET, "=init_let", 2) /* AST-use only. */ \
T(INIT_CONST, "=init_const", 2) /* AST-use only. */ \
- T(INIT_CONST_HARMONY, "=init_const_harmony", 2) /* AST-use only. */ \
+ T(INIT_CONST_LEGACY, "=init_const_legacy", 2) /* AST-use only. */ \
T(ASSIGN, "=", 2) \
T(ASSIGN_BIT_OR, "|=", 2) \
T(ASSIGN_BIT_XOR, "^=", 2) \
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
index 789511713..dc1620a07 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/transitions-inl.h
@@ -28,7 +28,6 @@
#ifndef V8_TRANSITIONS_INL_H_
#define V8_TRANSITIONS_INL_H_
-#include "objects-inl.h"
#include "transitions.h"
namespace v8 {
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 2ca04b88f..99b1b3d89 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -47,6 +47,12 @@ TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
Zone* zone)
: native_context_(native_context),
zone_(zone) {
+ Object* raw_info = code->type_feedback_info();
+ if (raw_info->IsTypeFeedbackInfo()) {
+ feedback_vector_ = Handle<FixedArray>(TypeFeedbackInfo::cast(raw_info)->
+ feedback_vector());
+ }
+
BuildDictionary(code);
ASSERT(dictionary_->IsDictionary());
}
@@ -72,6 +78,17 @@ Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
}
+Handle<Object> TypeFeedbackOracle::GetInfo(int slot) {
+ ASSERT(slot >= 0 && slot < feedback_vector_->length());
+ Object* obj = feedback_vector_->get(slot);
+ if (!obj->IsJSFunction() ||
+ !CanRetainOtherContext(JSFunction::cast(obj), *native_context_)) {
+ return Handle<Object>(obj, isolate());
+ }
+ return Handle<Object>::cast(isolate()->factory()->undefined_value());
+}
+
+
bool TypeFeedbackOracle::LoadIsUninitialized(TypeFeedbackId id) {
Handle<Object> maybe_code = GetInfo(id);
if (maybe_code->IsCode()) {
@@ -101,22 +118,26 @@ bool TypeFeedbackOracle::StoreIsKeyedPolymorphic(TypeFeedbackId ast_id) {
}
-bool TypeFeedbackOracle::CallIsMonomorphic(TypeFeedbackId id) {
- Handle<Object> value = GetInfo(id);
- return value->IsAllocationSite() || value->IsJSFunction();
+bool TypeFeedbackOracle::CallIsMonomorphic(int slot) {
+ Handle<Object> value = GetInfo(slot);
+ return FLAG_pretenuring_call_new
+ ? value->IsJSFunction()
+ : value->IsAllocationSite() || value->IsJSFunction();
}
-bool TypeFeedbackOracle::CallNewIsMonomorphic(TypeFeedbackId id) {
- Handle<Object> info = GetInfo(id);
- return info->IsAllocationSite() || info->IsJSFunction();
+bool TypeFeedbackOracle::CallNewIsMonomorphic(int slot) {
+ Handle<Object> info = GetInfo(slot);
+ return FLAG_pretenuring_call_new
+ ? info->IsJSFunction()
+ : info->IsAllocationSite() || info->IsJSFunction();
}
-byte TypeFeedbackOracle::ForInType(TypeFeedbackId id) {
- Handle<Object> value = GetInfo(id);
+byte TypeFeedbackOracle::ForInType(int feedback_vector_slot) {
+ Handle<Object> value = GetInfo(feedback_vector_slot);
return value->IsSmi() &&
- Smi::cast(*value)->value() == TypeFeedbackCells::kForInFastCaseMarker
+ Smi::cast(*value)->value() == TypeFeedbackInfo::kForInFastCaseMarker
? ForInStatement::FAST_FOR_IN : ForInStatement::SLOW_FOR_IN;
}
@@ -134,30 +155,31 @@ KeyedAccessStoreMode TypeFeedbackOracle::GetStoreMode(
}
-Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(TypeFeedbackId id) {
- Handle<Object> info = GetInfo(id);
- if (info->IsAllocationSite()) {
- return Handle<JSFunction>(isolate()->global_context()->array_function());
- } else {
+Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(int slot) {
+ Handle<Object> info = GetInfo(slot);
+ if (FLAG_pretenuring_call_new || info->IsJSFunction()) {
return Handle<JSFunction>::cast(info);
}
+
+ ASSERT(info->IsAllocationSite());
+ return Handle<JSFunction>(isolate()->native_context()->array_function());
}
-Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(TypeFeedbackId id) {
- Handle<Object> info = GetInfo(id);
- if (info->IsAllocationSite()) {
- return Handle<JSFunction>(isolate()->global_context()->array_function());
- } else {
+Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(int slot) {
+ Handle<Object> info = GetInfo(slot);
+ if (FLAG_pretenuring_call_new || info->IsJSFunction()) {
return Handle<JSFunction>::cast(info);
}
+
+ ASSERT(info->IsAllocationSite());
+ return Handle<JSFunction>(isolate()->native_context()->array_function());
}
-Handle<AllocationSite> TypeFeedbackOracle::GetCallNewAllocationSite(
- TypeFeedbackId id) {
- Handle<Object> info = GetInfo(id);
- if (info->IsAllocationSite()) {
+Handle<AllocationSite> TypeFeedbackOracle::GetCallNewAllocationSite(int slot) {
+ Handle<Object> info = GetInfo(slot);
+ if (FLAG_pretenuring_call_new || info->IsAllocationSite()) {
return Handle<AllocationSite>::cast(info);
}
return Handle<AllocationSite>::null();
@@ -206,7 +228,7 @@ void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
CompareIC::StubInfoToType(
stub_minor_key, left_type, right_type, combined_type, map, zone());
} else if (code->is_compare_nil_ic_stub()) {
- CompareNilICStub stub(code->extended_extra_ic_state());
+ CompareNilICStub stub(code->extra_ic_state());
*combined_type = stub.GetType(zone(), map);
*left_type = *right_type = stub.GetInputType(zone(), map);
}
@@ -233,7 +255,7 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
}
Handle<Code> code = Handle<Code>::cast(object);
ASSERT_EQ(Code::BINARY_OP_IC, code->kind());
- BinaryOpIC::State state(code->extended_extra_ic_state());
+ BinaryOpIC::State state(code->extra_ic_state());
ASSERT_EQ(op, state.op());
*left = state.GetLeftType(zone());
@@ -255,7 +277,7 @@ Type* TypeFeedbackOracle::CountType(TypeFeedbackId id) {
if (!object->IsCode()) return Type::None(zone());
Handle<Code> code = Handle<Code>::cast(object);
ASSERT_EQ(Code::BINARY_OP_IC, code->kind());
- BinaryOpIC::State state(code->extended_extra_ic_state());
+ BinaryOpIC::State state(code->extra_ic_state());
return state.GetLeftType(zone());
}
@@ -267,9 +289,7 @@ void TypeFeedbackOracle::PropertyReceiverTypes(
FunctionPrototypeStub proto_stub(Code::LOAD_IC);
*is_prototype = LoadIsStub(id, &proto_stub);
if (!*is_prototype) {
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, kNoExtraICState,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
CollectReceiverTypes(id, name, flags, receiver_types);
}
}
@@ -290,9 +310,7 @@ void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
void TypeFeedbackOracle::AssignmentReceiverTypes(
TypeFeedbackId id, Handle<String> name, SmallMapList* receiver_types) {
receiver_types->Clear();
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, kNoExtraICState,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
CollectReceiverTypes(id, name, flags, receiver_types);
}
@@ -409,7 +427,6 @@ void TypeFeedbackOracle::BuildDictionary(Handle<Code> code) {
GetRelocInfos(code, &infos);
CreateDictionary(code, &infos);
ProcessRelocInfos(&infos);
- ProcessTypeFeedbackCells(code);
// Allocate handle in the parent scope.
dictionary_ = scope.CloseAndEscape(dictionary_);
}
@@ -427,24 +444,21 @@ void TypeFeedbackOracle::GetRelocInfos(Handle<Code> code,
void TypeFeedbackOracle::CreateDictionary(Handle<Code> code,
ZoneList<RelocInfo>* infos) {
AllowHeapAllocation allocation_allowed;
- int cell_count = code->type_feedback_info()->IsTypeFeedbackInfo()
- ? TypeFeedbackInfo::cast(code->type_feedback_info())->
- type_feedback_cells()->CellCount()
- : 0;
- int length = infos->length() + cell_count;
- byte* old_start = code->instruction_start();
- dictionary_ = isolate()->factory()->NewUnseededNumberDictionary(length);
- byte* new_start = code->instruction_start();
- RelocateRelocInfos(infos, old_start, new_start);
+ Code* old_code = *code;
+ dictionary_ =
+ isolate()->factory()->NewUnseededNumberDictionary(infos->length());
+ RelocateRelocInfos(infos, old_code, *code);
}
void TypeFeedbackOracle::RelocateRelocInfos(ZoneList<RelocInfo>* infos,
- byte* old_start,
- byte* new_start) {
+ Code* old_code,
+ Code* new_code) {
for (int i = 0; i < infos->length(); i++) {
RelocInfo* info = &(*infos)[i];
- info->set_pc(new_start + (info->pc() - old_start));
+ info->set_host(new_code);
+ info->set_pc(new_code->instruction_start() +
+ (info->pc() - old_code->instruction_start()));
}
}
@@ -475,26 +489,6 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
}
-void TypeFeedbackOracle::ProcessTypeFeedbackCells(Handle<Code> code) {
- Object* raw_info = code->type_feedback_info();
- if (!raw_info->IsTypeFeedbackInfo()) return;
- Handle<TypeFeedbackCells> cache(
- TypeFeedbackInfo::cast(raw_info)->type_feedback_cells());
- for (int i = 0; i < cache->CellCount(); i++) {
- TypeFeedbackId ast_id = cache->AstId(i);
- Cell* cell = cache->GetCell(i);
- Object* value = cell->value();
- if (value->IsSmi() ||
- value->IsAllocationSite() ||
- (value->IsJSFunction() &&
- !CanRetainOtherContext(JSFunction::cast(value),
- *native_context_))) {
- SetInfo(ast_id, cell);
- }
- }
-}
-
-
void TypeFeedbackOracle::SetInfo(TypeFeedbackId ast_id, Object* target) {
ASSERT(dictionary_->FindEntry(IdToKey(ast_id)) ==
UnseededNumberDictionary::kNotFound);
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 8661d5057..5bf653f1c 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -50,14 +50,16 @@ class TypeFeedbackOracle: public ZoneObject {
bool LoadIsUninitialized(TypeFeedbackId id);
bool StoreIsUninitialized(TypeFeedbackId id);
bool StoreIsKeyedPolymorphic(TypeFeedbackId id);
+ bool CallIsMonomorphic(int slot);
bool CallIsMonomorphic(TypeFeedbackId aid);
- bool CallNewIsMonomorphic(TypeFeedbackId id);
+ bool KeyedArrayCallIsHoley(TypeFeedbackId id);
+ bool CallNewIsMonomorphic(int slot);
// TODO(1571) We can't use ForInStatement::ForInType as the return value due
// to various cycles in our headers.
// TODO(rossberg): once all oracle access is removed from ast.cc, it should
// be possible.
- byte ForInType(TypeFeedbackId id);
+ byte ForInType(int feedback_vector_slot);
KeyedAccessStoreMode GetStoreMode(TypeFeedbackId id);
@@ -84,9 +86,9 @@ class TypeFeedbackOracle: public ZoneObject {
static bool CanRetainOtherContext(JSFunction* function,
Context* native_context);
- Handle<JSFunction> GetCallTarget(TypeFeedbackId id);
- Handle<JSFunction> GetCallNewTarget(TypeFeedbackId id);
- Handle<AllocationSite> GetCallNewAllocationSite(TypeFeedbackId id);
+ Handle<JSFunction> GetCallTarget(int slot);
+ Handle<JSFunction> GetCallNewTarget(int slot);
+ Handle<AllocationSite> GetCallNewAllocationSite(int slot);
bool LoadIsBuiltin(TypeFeedbackId id, Builtins::Name builtin_id);
bool LoadIsStub(TypeFeedbackId id, ICStub* stub);
@@ -127,19 +129,23 @@ class TypeFeedbackOracle: public ZoneObject {
void GetRelocInfos(Handle<Code> code, ZoneList<RelocInfo>* infos);
void CreateDictionary(Handle<Code> code, ZoneList<RelocInfo>* infos);
void RelocateRelocInfos(ZoneList<RelocInfo>* infos,
- byte* old_start,
- byte* new_start);
+ Code* old_code,
+ Code* new_code);
void ProcessRelocInfos(ZoneList<RelocInfo>* infos);
- void ProcessTypeFeedbackCells(Handle<Code> code);
// Returns an element from the backing store. Returns undefined if
// there is no information.
Handle<Object> GetInfo(TypeFeedbackId id);
+ // Returns an element from the type feedback vector. Returns undefined
+ // if there is no information.
+ Handle<Object> GetInfo(int slot);
+
private:
Handle<Context> native_context_;
Zone* zone_;
Handle<UnseededNumberDictionary> dictionary_;
+ Handle<FixedArray> feedback_vector_;
DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
};
diff --git a/deps/v8/src/typedarray.js b/deps/v8/src/typedarray.js
index 4195dd5ea..109d62700 100644
--- a/deps/v8/src/typedarray.js
+++ b/deps/v8/src/typedarray.js
@@ -66,7 +66,7 @@ macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
if (offset % ELEMENT_SIZE !== 0) {
throw MakeRangeError("invalid_typed_array_alignment",
- "start offset", "NAME", ELEMENT_SIZE);
+ ["start offset", "NAME", ELEMENT_SIZE]);
}
if (offset > bufferByteLength) {
throw MakeRangeError("invalid_typed_array_offset");
@@ -78,7 +78,7 @@ macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
if (IS_UNDEFINED(length)) {
if (bufferByteLength % ELEMENT_SIZE !== 0) {
throw MakeRangeError("invalid_typed_array_alignment",
- "byte length", "NAME", ELEMENT_SIZE);
+ ["byte length", "NAME", ELEMENT_SIZE]);
}
newByteLength = bufferByteLength - offset;
newLength = newByteLength / ELEMENT_SIZE;
@@ -87,28 +87,32 @@ macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
newByteLength = newLength * ELEMENT_SIZE;
}
if ((offset + newByteLength > bufferByteLength)
- || (newLength > %MaxSmi())) {
+ || (newLength > %_MaxSmi())) {
throw MakeRangeError("invalid_typed_array_length");
}
- %TypedArrayInitialize(obj, ARRAY_ID, buffer, offset, newByteLength);
+ %_TypedArrayInitialize(obj, ARRAY_ID, buffer, offset, newByteLength);
}
function NAMEConstructByLength(obj, length) {
var l = IS_UNDEFINED(length) ?
0 : ToPositiveInteger(length, "invalid_typed_array_length");
- if (l > %MaxSmi()) {
+ if (l > %_MaxSmi()) {
throw MakeRangeError("invalid_typed_array_length");
}
var byteLength = l * ELEMENT_SIZE;
- var buffer = new $ArrayBuffer(byteLength);
- %TypedArrayInitialize(obj, ARRAY_ID, buffer, 0, byteLength);
+ if (byteLength > %_TypedArrayMaxSizeInHeap()) {
+ var buffer = new $ArrayBuffer(byteLength);
+ %_TypedArrayInitialize(obj, ARRAY_ID, buffer, 0, byteLength);
+ } else {
+ %_TypedArrayInitialize(obj, ARRAY_ID, null, 0, byteLength);
+ }
}
function NAMEConstructByArrayLike(obj, arrayLike) {
var length = arrayLike.length;
var l = ToPositiveInteger(length, "invalid_typed_array_length");
- if (l > %MaxSmi()) {
+ if (l > %_MaxSmi()) {
throw MakeRangeError("invalid_typed_array_length");
}
if(!%TypedArrayInitializeFromArrayLike(obj, ARRAY_ID, arrayLike, l)) {
@@ -257,7 +261,7 @@ function TypedArraySet(obj, offset) {
throw MakeTypeError("typed_array_set_negative_offset");
}
- if (intOffset > %MaxSmi()) {
+ if (intOffset > %_MaxSmi()) {
throw MakeRangeError("typed_array_set_source_too_large");
}
switch (%TypedArraySetFastCases(this, obj, intOffset)) {
@@ -350,7 +354,7 @@ function DataViewConstructor(buffer, byteOffset, byteLength) { // length = 3
if (length < 0 || offset + length > bufferByteLength) {
throw new MakeRangeError('invalid_data_view_length');
}
- %DataViewInitialize(this, buffer, offset, length);
+ %_DataViewInitialize(this, buffer, offset, length);
} else {
throw MakeTypeError('constructor_not_function', ["DataView"]);
}
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index 7867899d7..e269582ca 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -141,29 +141,32 @@ int TypeImpl<Config>::LubBitset() {
}
return bitset;
} else if (this->IsClass()) {
- return LubBitset(*this->AsClass());
+ int bitset = Config::lub_bitset(this);
+ return bitset ? bitset : LubBitset(*this->AsClass());
} else {
- return LubBitset(*this->AsConstant());
+ int bitset = Config::lub_bitset(this);
+ return bitset ? bitset : LubBitset(*this->AsConstant());
}
}
template<class Config>
int TypeImpl<Config>::LubBitset(i::Object* value) {
- if (value->IsSmi()) return kSmi;
+ if (value->IsSmi()) return kSignedSmall & kTaggedInt;
i::Map* map = i::HeapObject::cast(value)->map();
if (map->instance_type() == HEAP_NUMBER_TYPE) {
int32_t i;
uint32_t u;
- if (value->ToInt32(&i)) return Smi::IsValid(i) ? kSmi : kOtherSigned32;
- if (value->ToUint32(&u)) return kUnsigned32;
- return kDouble;
+ return kTaggedPtr & (
+ value->ToInt32(&i) ? (Smi::IsValid(i) ? kSignedSmall : kOtherSigned32) :
+ value->ToUint32(&u) ? kUnsigned32 : kFloat);
}
if (map->instance_type() == ODDBALL_TYPE) {
if (value->IsUndefined()) return kUndefined;
if (value->IsNull()) return kNull;
if (value->IsBoolean()) return kBoolean;
if (value->IsTheHole()) return kAny; // TODO(rossberg): kNone?
+ if (value->IsUninitialized()) return kNone;
UNREACHABLE();
}
return LubBitset(map);
@@ -201,7 +204,7 @@ int TypeImpl<Config>::LubBitset(i::Map* map) {
case ODDBALL_TYPE:
return kOddball;
case HEAP_NUMBER_TYPE:
- return kDouble;
+ return kFloat & kTaggedPtr;
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
case JS_OBJECT_TYPE:
@@ -244,7 +247,7 @@ int TypeImpl<Config>::LubBitset(i::Map* map) {
case EXECUTABLE_ACCESSOR_INFO_TYPE:
case ACCESSOR_PAIR_TYPE:
case FIXED_ARRAY_TYPE:
- return kInternal;
+ return kInternal & kTaggedPtr;
default:
UNREACHABLE();
return kNone;
@@ -270,13 +273,12 @@ int TypeImpl<Config>::GlbBitset() {
template<class Config>
typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::OfCurrently(
i::Handle<i::Object> value, Region* region) {
- if (value->IsSmi()) return Smi(region);
- i::Map* map = i::HeapObject::cast(*value)->map();
- if (map->instance_type() == HEAP_NUMBER_TYPE ||
- map->instance_type() == ODDBALL_TYPE) {
+ if (value->IsSmi() ||
+ i::HeapObject::cast(*value)->map()->instance_type() == HEAP_NUMBER_TYPE ||
+ i::HeapObject::cast(*value)->map()->instance_type() == ODDBALL_TYPE) {
return Of(value, region);
}
- return Class(i::handle(map), region);
+ return Class(i::handle(i::HeapObject::cast(*value)->map()), region);
}
@@ -337,10 +339,10 @@ template<class Config>
bool TypeImpl<Config>::Maybe(TypeImpl* that) {
// Fast path for bitsets.
if (this->IsBitset()) {
- return (this->AsBitset() & that->LubBitset()) != 0;
+ return IsInhabited(this->AsBitset() & that->LubBitset());
}
if (that->IsBitset()) {
- return (this->LubBitset() & that->AsBitset()) != 0;
+ return IsInhabited(this->LubBitset() & that->AsBitset());
}
// (T1 \/ ... \/ Tn) overlaps T <=> (T1 overlaps T) \/ ... \/ (Tn overlaps T)
@@ -547,9 +549,9 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Convert(
if (type->IsBitset()) {
return Config::from_bitset(type->AsBitset(), region);
} else if (type->IsClass()) {
- return Config::from_class(type->AsClass(), region);
+ return Config::from_class(type->AsClass(), type->LubBitset(), region);
} else if (type->IsConstant()) {
- return Config::from_constant(type->AsConstant(), region);
+ return Config::from_constant(type->AsConstant(), type->LubBitset(), region);
} else {
ASSERT(type->IsUnion());
typename OtherType::UnionedHandle unioned = type->AsUnion();
@@ -567,7 +569,7 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Convert(
// TODO(rossberg): this does not belong here.
Representation Representation::FromType(Type* type) {
if (type->Is(Type::None())) return Representation::None();
- if (type->Is(Type::Smi())) return Representation::Smi();
+ if (type->Is(Type::SignedSmall())) return Representation::Smi();
if (type->Is(Type::Signed32())) return Representation::Integer32();
if (type->Is(Type::Number())) return Representation::Double();
return Representation::Tagged();
@@ -576,8 +578,8 @@ Representation Representation::FromType(Type* type) {
#ifdef OBJECT_PRINT
template<class Config>
-void TypeImpl<Config>::TypePrint() {
- TypePrint(stdout);
+void TypeImpl<Config>::TypePrint(PrintDimension dim) {
+ TypePrint(stdout, dim);
PrintF(stdout, "\n");
Flush(stdout);
}
@@ -586,9 +588,17 @@ void TypeImpl<Config>::TypePrint() {
template<class Config>
const char* TypeImpl<Config>::bitset_name(int bitset) {
switch (bitset) {
- #define PRINT_COMPOSED_TYPE(type, value) case k##type: return #type;
- BITSET_TYPE_LIST(PRINT_COMPOSED_TYPE)
+ case kAny & kRepresentation: return "Any";
+ #define PRINT_COMPOSED_TYPE(type, value) \
+ case k##type & kRepresentation: return #type;
+ REPRESENTATION_BITSET_TYPE_LIST(PRINT_COMPOSED_TYPE)
#undef PRINT_COMPOSED_TYPE
+
+ #define PRINT_COMPOSED_TYPE(type, value) \
+ case k##type & kSemantic: return #type;
+ SEMANTIC_BITSET_TYPE_LIST(PRINT_COMPOSED_TYPE)
+ #undef PRINT_COMPOSED_TYPE
+
default:
return NULL;
}
@@ -596,23 +606,54 @@ const char* TypeImpl<Config>::bitset_name(int bitset) {
template<class Config>
-void TypeImpl<Config>::TypePrint(FILE* out) {
+void TypeImpl<Config>::BitsetTypePrint(FILE* out, int bitset) {
+ const char* name = bitset_name(bitset);
+ if (name != NULL) {
+ PrintF(out, "%s", name);
+ } else {
+ static const int named_bitsets[] = {
+ #define BITSET_CONSTANT(type, value) k##type & kRepresentation,
+ REPRESENTATION_BITSET_TYPE_LIST(BITSET_CONSTANT)
+ #undef BITSET_CONSTANT
+
+ #define BITSET_CONSTANT(type, value) k##type & kSemantic,
+ SEMANTIC_BITSET_TYPE_LIST(BITSET_CONSTANT)
+ #undef BITSET_CONSTANT
+ };
+
+ bool is_first = true;
+ PrintF(out, "(");
+ for (int i(ARRAY_SIZE(named_bitsets) - 1); bitset != 0 && i >= 0; --i) {
+ int subset = named_bitsets[i];
+ if ((bitset & subset) == subset) {
+ if (!is_first) PrintF(out, " | ");
+ is_first = false;
+ PrintF(out, "%s", bitset_name(subset));
+ bitset -= subset;
+ }
+ }
+ ASSERT(bitset == 0);
+ PrintF(out, ")");
+ }
+}
+
+
+template<class Config>
+void TypeImpl<Config>::TypePrint(FILE* out, PrintDimension dim) {
if (this->IsBitset()) {
int bitset = this->AsBitset();
- const char* name = bitset_name(bitset);
- if (name != NULL) {
- PrintF(out, "%s", name);
- } else {
- bool is_first = true;
- PrintF(out, "(");
- for (int mask = 1; mask != 0; mask = mask << 1) {
- if ((bitset & mask) != 0) {
- if (!is_first) PrintF(out, " | ");
- is_first = false;
- PrintF(out, "%s", bitset_name(mask));
- }
- }
- PrintF(out, ")");
+ switch (dim) {
+ case BOTH_DIMS:
+ BitsetTypePrint(out, bitset & kSemantic);
+ PrintF("/");
+ BitsetTypePrint(out, bitset & kRepresentation);
+ break;
+ case SEMANTIC_DIM:
+ BitsetTypePrint(out, bitset & kSemantic);
+ break;
+ case REPRESENTATION_DIM:
+ BitsetTypePrint(out, bitset & kRepresentation);
+ break;
}
} else if (this->IsConstant()) {
PrintF(out, "Constant(%p : ", static_cast<void*>(*this->AsConstant()));
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
index 99a809dc1..4569d131b 100644
--- a/deps/v8/src/types.h
+++ b/deps/v8/src/types.h
@@ -42,7 +42,10 @@ namespace internal {
// can express class types (a.k.a. specific maps) and singleton types (i.e.,
// concrete constants).
//
-// The following equations and inequations hold:
+// Types consist of two dimensions: semantic (value range) and representation.
+// Both are related through subtyping.
+//
+// The following equations and inequations hold for the semantic axis:
//
// None <= T
// T <= Any
@@ -54,13 +57,12 @@ namespace internal {
// UniqueName = InternalizedString \/ Symbol
// InternalizedString < String
//
-// Allocated = Receiver \/ Number \/ Name
-// Detectable = Allocated - Undetectable
-// Undetectable < Object
// Receiver = Object \/ Proxy
// Array < Object
// Function < Object
// RegExp < Object
+// Undetectable < Object
+// Detectable = Receiver \/ Number \/ Name - Undetectable
//
// Class(map) < T iff instance_type(map) < T
// Constant(x) < T iff instance_type(map(x)) < T
@@ -70,65 +72,121 @@ namespace internal {
// TODO(rossberg): the latter is not currently true for proxies, because of fix,
// but will hold once we implement direct proxies.
//
+// For the representation axis, the following holds:
+//
+// None <= R
+// R <= Any
+//
+// UntaggedInt <= UntaggedInt8 \/ UntaggedInt16 \/ UntaggedInt32)
+// UntaggedFloat <= UntaggedFloat32 \/ UntaggedFloat64
+// UntaggedNumber <= UntaggedInt \/ UntaggedFloat
+// Untagged <= UntaggedNumber \/ UntaggedPtr
+// Tagged <= TaggedInt \/ TaggedPtr
+//
+// Subtyping relates the two dimensions, for example:
+//
+// Number <= Tagged \/ UntaggedNumber
+// Object <= TaggedPtr \/ UntaggedPtr
+//
+// That holds because the semantic type constructors defined by the API create
+// types that allow for all possible representations, and dually, the ones for
+// representation types initially include all semantic ranges. Representations
+// can then e.g. be narrowed for a given semantic type using intersection:
+//
+// SignedSmall /\ TaggedInt (a 'smi')
+// Number /\ TaggedPtr (a heap number)
+//
// There are two main functions for testing types:
//
// T1->Is(T2) -- tests whether T1 is included in T2 (i.e., T1 <= T2)
// T1->Maybe(T2) -- tests whether T1 and T2 overlap (i.e., T1 /\ T2 =/= 0)
//
// Typically, the former is to be used to select representations (e.g., via
-// T->Is(Integer31())), and the to check whether a specific case needs handling
-// (e.g., via T->Maybe(Number())).
+// T->Is(SignedSmall())), and the latter to check whether a specific case needs
+// handling (e.g., via T->Maybe(Number())).
//
// There is no functionality to discover whether a type is a leaf in the
// lattice. That is intentional. It should always be possible to refine the
// lattice (e.g., splitting up number types further) without invalidating any
// existing assumptions or tests.
-//
// Consequently, do not use pointer equality for type tests, always use Is!
//
// Internally, all 'primitive' types, and their unions, are represented as
-// bitsets via smis. Class is a heap pointer to the respective map. Only
-// Constant's, or unions containing Class'es or Constant's, require allocation.
+// bitsets. Class is a heap pointer to the respective map. Only Constant's, or
+// unions containing Class'es or Constant's, currently require allocation.
// Note that the bitset representation is closed under both Union and Intersect.
//
-// The type representation is heap-allocated, so cannot (currently) be used in
-// a concurrent compilation context.
-
-
-#define BITSET_TYPE_LIST(V) \
- V(None, 0) \
- V(Null, 1 << 0) \
- V(Undefined, 1 << 1) \
- V(Boolean, 1 << 2) \
- V(Smi, 1 << 3) \
- V(OtherSigned32, 1 << 4) \
- V(Unsigned32, 1 << 5) \
- V(Double, 1 << 6) \
- V(Symbol, 1 << 7) \
- V(InternalizedString, 1 << 8) \
- V(OtherString, 1 << 9) \
- V(Undetectable, 1 << 10) \
- V(Array, 1 << 11) \
- V(Function, 1 << 12) \
- V(RegExp, 1 << 13) \
- V(OtherObject, 1 << 14) \
- V(Proxy, 1 << 15) \
- V(Internal, 1 << 16) \
+// There are two type representations, using different allocation:
+//
+// - class Type (zone-allocated, for compiler and concurrent compilation)
+// - class HeapType (heap-allocated, for persistent types)
+//
+// Both provide the same API, and the Convert method can be used to interconvert
+// them. For zone types, no query method touches the heap, only constructors do.
+
+
+#define MASK_BITSET_TYPE_LIST(V) \
+ V(Representation, static_cast<int>(0xff800000)) \
+ V(Semantic, static_cast<int>(0x007fffff))
+
+#define REPRESENTATION(k) ((k) & kRepresentation)
+#define SEMANTIC(k) ((k) & kSemantic)
+
+#define REPRESENTATION_BITSET_TYPE_LIST(V) \
+ V(None, 0) \
+ V(UntaggedInt8, 1 << 23 | kSemantic) \
+ V(UntaggedInt16, 1 << 24 | kSemantic) \
+ V(UntaggedInt32, 1 << 25 | kSemantic) \
+ V(UntaggedFloat32, 1 << 26 | kSemantic) \
+ V(UntaggedFloat64, 1 << 27 | kSemantic) \
+ V(UntaggedPtr, 1 << 28 | kSemantic) \
+ V(TaggedInt, 1 << 29 | kSemantic) \
+ V(TaggedPtr, -1 << 30 | kSemantic) /* MSB has to be sign-extended */ \
\
- V(Oddball, kBoolean | kNull | kUndefined) \
- V(Signed32, kSmi | kOtherSigned32) \
- V(Number, kSigned32 | kUnsigned32 | kDouble) \
- V(String, kInternalizedString | kOtherString) \
- V(UniqueName, kSymbol | kInternalizedString) \
- V(Name, kSymbol | kString) \
- V(NumberOrString, kNumber | kString) \
- V(Object, kUndetectable | kArray | kFunction | \
- kRegExp | kOtherObject) \
- V(Receiver, kObject | kProxy) \
- V(Allocated, kDouble | kName | kReceiver) \
- V(Any, kOddball | kNumber | kAllocated | kInternal) \
- V(NonNumber, kAny - kNumber) \
- V(Detectable, kAllocated - kUndetectable)
+ V(UntaggedInt, kUntaggedInt8 | kUntaggedInt16 | kUntaggedInt32) \
+ V(UntaggedFloat, kUntaggedFloat32 | kUntaggedFloat64) \
+ V(UntaggedNumber, kUntaggedInt | kUntaggedFloat) \
+ V(Untagged, kUntaggedNumber | kUntaggedPtr) \
+ V(Tagged, kTaggedInt | kTaggedPtr)
+
+#define SEMANTIC_BITSET_TYPE_LIST(V) \
+ V(Null, 1 << 0 | REPRESENTATION(kTaggedPtr)) \
+ V(Undefined, 1 << 1 | REPRESENTATION(kTaggedPtr)) \
+ V(Boolean, 1 << 2 | REPRESENTATION(kTaggedPtr)) \
+ V(SignedSmall, 1 << 3 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(OtherSigned32, 1 << 4 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(Unsigned32, 1 << 5 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(Float, 1 << 6 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+ V(Symbol, 1 << 7 | REPRESENTATION(kTaggedPtr)) \
+ V(InternalizedString, 1 << 8 | REPRESENTATION(kTaggedPtr)) \
+ V(OtherString, 1 << 9 | REPRESENTATION(kTaggedPtr)) \
+ V(Undetectable, 1 << 10 | REPRESENTATION(kTaggedPtr)) \
+ V(Array, 1 << 11 | REPRESENTATION(kTaggedPtr)) \
+ V(Function, 1 << 12 | REPRESENTATION(kTaggedPtr)) \
+ V(RegExp, 1 << 13 | REPRESENTATION(kTaggedPtr)) \
+ V(OtherObject, 1 << 14 | REPRESENTATION(kTaggedPtr)) \
+ V(Proxy, 1 << 15 | REPRESENTATION(kTaggedPtr)) \
+ V(Internal, 1 << 16 | REPRESENTATION(kTagged | kUntagged)) \
+ \
+ V(Oddball, kBoolean | kNull | kUndefined) \
+ V(Signed32, kSignedSmall | kOtherSigned32) \
+ V(Number, kSigned32 | kUnsigned32 | kFloat) \
+ V(String, kInternalizedString | kOtherString) \
+ V(UniqueName, kSymbol | kInternalizedString) \
+ V(Name, kSymbol | kString) \
+ V(NumberOrString, kNumber | kString) \
+ V(DetectableObject, kArray | kFunction | kRegExp | kOtherObject) \
+ V(DetectableReceiver, kDetectableObject | kProxy) \
+ V(Detectable, kDetectableReceiver | kNumber | kName) \
+ V(Object, kDetectableObject | kUndetectable) \
+ V(Receiver, kObject | kProxy) \
+ V(NonNumber, kOddball | kName | kReceiver | kInternal) \
+ V(Any, kNumber | kNonNumber)
+
+#define BITSET_TYPE_LIST(V) \
+ MASK_BITSET_TYPE_LIST(V) \
+ REPRESENTATION_BITSET_TYPE_LIST(V) \
+ SEMANTIC_BITSET_TYPE_LIST(V)
// struct Config {
@@ -147,14 +205,15 @@ namespace internal {
// static Handle<Unioned>::type as_union(Type*);
// static Type* from_bitset(int bitset);
// static Handle<Type>::type from_bitset(int bitset, Region*);
-// static Handle<Type>::type from_class(i::Handle<i::Map>, Region*)
-// static Handle<Type>::type from_constant(i::Handle<i::Object>, Region*);
+// static Handle<Type>::type from_class(i::Handle<Map>, int lub, Region*);
+// static Handle<Type>::type from_constant(i::Handle<Object>, int, Region*);
// static Handle<Type>::type from_union(Handle<Unioned>::type);
// static Handle<Unioned>::type union_create(int size, Region*);
// static void union_shrink(Handle<Unioned>::type, int size);
// static Handle<Type>::type union_get(Handle<Unioned>::type, int);
// static void union_set(Handle<Unioned>::type, int, Handle<Type>::type);
// static int union_length(Handle<Unioned>::type);
+// static int lub_bitset(Type*);
// }
template<class Config>
class TypeImpl : public Config::Base {
@@ -171,10 +230,10 @@ class TypeImpl : public Config::Base {
#undef DEFINE_TYPE_CONSTRUCTOR
static TypeHandle Class(i::Handle<i::Map> map, Region* region) {
- return Config::from_class(map, region);
+ return Config::from_class(map, LubBitset(*map), region);
}
static TypeHandle Constant(i::Handle<i::Object> value, Region* region) {
- return Config::from_constant(value, region);
+ return Config::from_constant(value, LubBitset(*value), region);
}
static TypeHandle Union(TypeHandle type1, TypeHandle type2, Region* reg);
@@ -248,8 +307,9 @@ class TypeImpl : public Config::Base {
typename OtherTypeImpl::TypeHandle type, Region* region);
#ifdef OBJECT_PRINT
- void TypePrint();
- void TypePrint(FILE* out);
+ enum PrintDimension { BOTH_DIMS, SEMANTIC_DIM, REPRESENTATION_DIM };
+ void TypePrint(PrintDimension = BOTH_DIMS);
+ void TypePrint(FILE* out, PrintDimension = BOTH_DIMS);
#endif
private:
@@ -286,6 +346,10 @@ class TypeImpl : public Config::Base {
bool SlowIs(TypeImpl* that);
+ static bool IsInhabited(int bitset) {
+ return (bitset & kRepresentation) && (bitset & kSemantic);
+ }
+
int LubBitset(); // least upper bound that's a bitset
int GlbBitset(); // greatest lower bound that's a bitset
@@ -300,6 +364,7 @@ class TypeImpl : public Config::Base {
#ifdef OBJECT_PRINT
static const char* bitset_name(int bitset);
+ static void BitsetTypePrint(FILE* out, int bitset);
#endif
};
@@ -335,7 +400,7 @@ struct ZoneTypeConfig {
}
template<class T>
static void tagged_set(Tagged* tagged, int i, T value) {
- tagged->at(i + 1) = reinterpret_cast<T>(value);
+ tagged->at(i + 1) = reinterpret_cast<void*>(value);
}
static int tagged_length(Tagged* tagged) {
return tagged->length() - 1;
@@ -375,11 +440,11 @@ struct ZoneTypeConfig {
}
static i::Handle<i::Map> as_class(Type* type) {
ASSERT(is_class(type));
- return i::Handle<i::Map>(tagged_get<i::Map**>(as_tagged(type), 0));
+ return i::Handle<i::Map>(tagged_get<i::Map**>(as_tagged(type), 1));
}
static i::Handle<i::Object> as_constant(Type* type) {
ASSERT(is_constant(type));
- return i::Handle<i::Object>(tagged_get<i::Object**>(as_tagged(type), 0));
+ return i::Handle<i::Object>(tagged_get<i::Object**>(as_tagged(type), 1));
}
static Unioned* as_union(Type* type) {
ASSERT(is_union(type));
@@ -399,14 +464,16 @@ struct ZoneTypeConfig {
static Type* from_tagged(Tagged* tagged) {
return reinterpret_cast<Type*>(tagged);
}
- static Type* from_class(i::Handle<i::Map> map, Zone* zone) {
- Tagged* tagged = tagged_create(kClassTag, 1, zone);
- tagged_set(tagged, 0, map.location());
+ static Type* from_class(i::Handle<i::Map> map, int lub, Zone* zone) {
+ Tagged* tagged = tagged_create(kClassTag, 2, zone);
+ tagged_set(tagged, 0, lub);
+ tagged_set(tagged, 1, map.location());
return from_tagged(tagged);
}
- static Type* from_constant(i::Handle<i::Object> value, Zone* zone) {
- Tagged* tagged = tagged_create(kConstantTag, 1, zone);
- tagged_set(tagged, 0, value.location());
+ static Type* from_constant(i::Handle<i::Object> value, int lub, Zone* zone) {
+ Tagged* tagged = tagged_create(kConstantTag, 2, zone);
+ tagged_set(tagged, 0, lub);
+ tagged_set(tagged, 1, value.location());
return from_tagged(tagged);
}
static Type* from_union(Unioned* unioned) {
@@ -434,6 +501,10 @@ struct ZoneTypeConfig {
static int union_length(Unioned* unioned) {
return tagged_length(tagged_from_union(unioned));
}
+ static int lub_bitset(Type* type) {
+ ASSERT(is_class(type) || is_constant(type));
+ return static_cast<int>(tagged_get<intptr_t>(as_tagged(type), 0));
+ }
};
@@ -475,11 +546,12 @@ struct HeapTypeConfig {
static i::Handle<Type> from_bitset(int bitset, Isolate* isolate) {
return i::handle(from_bitset(bitset), isolate);
}
- static i::Handle<Type> from_class(i::Handle<i::Map> map, Isolate* isolate) {
+ static i::Handle<Type> from_class(
+ i::Handle<i::Map> map, int lub, Isolate* isolate) {
return i::Handle<Type>::cast(i::Handle<Object>::cast(map));
}
static i::Handle<Type> from_constant(
- i::Handle<i::Object> value, Isolate* isolate) {
+ i::Handle<i::Object> value, int lub, Isolate* isolate) {
i::Handle<Box> box = isolate->factory()->NewBox(value);
return i::Handle<Type>::cast(i::Handle<Object>::cast(box));
}
@@ -506,6 +578,9 @@ struct HeapTypeConfig {
static int union_length(i::Handle<Unioned> unioned) {
return unioned->length();
}
+ static int lub_bitset(Type* type) {
+ return 0; // kNone, which causes recomputation.
+ }
};
typedef TypeImpl<ZoneTypeConfig> Type;
@@ -560,6 +635,10 @@ struct BoundsImpl {
TypeHandle upper = Type::Intersect(b.upper, t, region);
return BoundsImpl(lower, upper);
}
+
+ bool Narrows(BoundsImpl that) {
+ return that.lower->Is(this->lower) && this->upper->Is(that.upper);
+ }
};
typedef BoundsImpl<ZoneTypeConfig> Bounds;
diff --git a/deps/v8/src/typing.cc b/deps/v8/src/typing.cc
index c7bea40ac..2a581e293 100644
--- a/deps/v8/src/typing.cc
+++ b/deps/v8/src/typing.cc
@@ -323,7 +323,7 @@ void AstTyper::VisitForStatement(ForStatement* stmt) {
void AstTyper::VisitForInStatement(ForInStatement* stmt) {
// Collect type feedback.
stmt->set_for_in_type(static_cast<ForInStatement::ForInType>(
- oracle()->ForInType(stmt->ForInFeedbackId())));
+ oracle()->ForInType(stmt->ForInFeedbackSlot())));
RECURSE(Visit(stmt->enumerable()));
store_.Forget(); // Control may transfer here via looping or 'continue'.
@@ -530,8 +530,9 @@ void AstTyper::VisitCall(Call* expr) {
// Collect type feedback.
RECURSE(Visit(expr->expression()));
if (!expr->expression()->IsProperty() &&
- oracle()->CallIsMonomorphic(expr->CallFeedbackId())) {
- expr->set_target(oracle()->GetCallTarget(expr->CallFeedbackId()));
+ expr->HasCallFeedbackSlot() &&
+ oracle()->CallIsMonomorphic(expr->CallFeedbackSlot())) {
+ expr->set_target(oracle()->GetCallTarget(expr->CallFeedbackSlot()));
}
ZoneList<Expression*>* args = expr->arguments();
@@ -560,7 +561,7 @@ void AstTyper::VisitCallNew(CallNew* expr) {
RECURSE(Visit(arg));
}
- // We don't know anything about the result type.
+ NarrowType(expr, Bounds(Type::None(zone()), Type::Receiver(zone())));
}
@@ -611,7 +612,7 @@ void AstTyper::VisitCountOperation(CountOperation* expr) {
RECURSE(Visit(expr->expression()));
- NarrowType(expr, Bounds(Type::Smi(zone()), Type::Number(zone())));
+ NarrowType(expr, Bounds(Type::SignedSmall(zone()), Type::Number(zone())));
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsStackAllocated()) {
@@ -667,7 +668,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
Type* upper = Type::Union(
expr->left()->bounds().upper, expr->right()->bounds().upper, zone());
if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone());
- Type* lower = Type::Intersect(Type::Smi(zone()), upper, zone());
+ Type* lower = Type::Intersect(Type::SignedSmall(zone()), upper, zone());
NarrowType(expr, Bounds(lower, upper));
break;
}
@@ -676,7 +677,8 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::SAR:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr, Bounds(Type::Smi(zone()), Type::Signed32(zone())));
+ NarrowType(expr,
+ Bounds(Type::SignedSmall(zone()), Type::Signed32(zone())));
break;
case Token::SHR:
RECURSE(Visit(expr->left()));
@@ -684,7 +686,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
// TODO(rossberg): The upper bound would be Unsigned32, but since there
// is no 'positive Smi' type for the lower bound, we use the smallest
// union of Smi and Unsigned32 as upper bound instead.
- NarrowType(expr, Bounds(Type::Smi(zone()), Type::Number(zone())));
+ NarrowType(expr, Bounds(Type::SignedSmall(zone()), Type::Number(zone())));
break;
case Token::ADD: {
RECURSE(Visit(expr->left()));
@@ -697,7 +699,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
l.lower->Is(Type::String()) || r.lower->Is(Type::String()) ?
Type::String(zone()) :
l.lower->Is(Type::Number()) && r.lower->Is(Type::Number()) ?
- Type::Smi(zone()) : Type::None(zone());
+ Type::SignedSmall(zone()) : Type::None(zone());
Type* upper =
l.upper->Is(Type::String()) || r.upper->Is(Type::String()) ?
Type::String(zone()) :
@@ -712,7 +714,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::MOD:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr, Bounds(Type::Smi(zone()), Type::Number(zone())));
+ NarrowType(expr, Bounds(Type::SignedSmall(zone()), Type::Number(zone())));
break;
default:
UNREACHABLE();
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc
index bd3246778..2bef7ab20 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/unicode.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// This file was generated at 2012-03-06 09:55:58.934483
+// This file was generated at 2014-02-07 15:31:16.733174
#include "unicode-inl.h"
#include <stdlib.h>
@@ -710,28 +710,6 @@ bool Letter::Is(uchar c) {
}
-// Space: point.category == 'Zs'
-
-static const uint16_t kSpaceTable0Size = 4;
-static const int32_t kSpaceTable0[4] = {
- 32, 160, 5760, 6158 }; // NOLINT
-static const uint16_t kSpaceTable1Size = 5;
-static const int32_t kSpaceTable1[5] = {
- 1073741824, 10, 47, 95, 4096 }; // NOLINT
-bool Space::Is(uchar c) {
- int chunk_index = c >> 13;
- switch (chunk_index) {
- case 0: return LookupPredicate(kSpaceTable0,
- kSpaceTable0Size,
- c);
- case 1: return LookupPredicate(kSpaceTable1,
- kSpaceTable1Size,
- c);
- default: return false;
- }
-}
-
-
// Number: point.category == 'Nd'
static const uint16_t kNumberTable0Size = 56;
@@ -767,14 +745,14 @@ bool Number::Is(uchar c) {
}
-// WhiteSpace: 'Ws' in point.properties
+// WhiteSpace: point.category == 'Zs'
-static const uint16_t kWhiteSpaceTable0Size = 7;
-static const int32_t kWhiteSpaceTable0[7] = {
- 1073741833, 13, 32, 133, 160, 5760, 6158 }; // NOLINT
-static const uint16_t kWhiteSpaceTable1Size = 7;
-static const int32_t kWhiteSpaceTable1[7] = {
- 1073741824, 10, 1073741864, 41, 47, 95, 4096 }; // NOLINT
+static const uint16_t kWhiteSpaceTable0Size = 4;
+static const int32_t kWhiteSpaceTable0[4] = {
+ 32, 160, 5760, 6158 }; // NOLINT
+static const uint16_t kWhiteSpaceTable1Size = 5;
+static const int32_t kWhiteSpaceTable1[5] = {
+ 1073741824, 10, 47, 95, 4096 }; // NOLINT
bool WhiteSpace::Is(uchar c) {
int chunk_index = c >> 13;
switch (chunk_index) {
@@ -1833,8 +1811,6 @@ int UnicodeData::GetByteCount() {
+ kLetterTable5Size * sizeof(int32_t) // NOLINT
+ kLetterTable6Size * sizeof(int32_t) // NOLINT
+ kLetterTable7Size * sizeof(int32_t) // NOLINT
- + kSpaceTable0Size * sizeof(int32_t) // NOLINT
- + kSpaceTable1Size * sizeof(int32_t) // NOLINT
+ kNumberTable0Size * sizeof(int32_t) // NOLINT
+ kNumberTable5Size * sizeof(int32_t) // NOLINT
+ kNumberTable7Size * sizeof(int32_t) // NOLINT
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index bb5506d38..65a9af58f 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -226,9 +226,6 @@ struct Lowercase {
struct Letter {
static bool Is(uchar c);
};
-struct Space {
- static bool Is(uchar c);
-};
struct Number {
static bool Is(uchar c);
};
diff --git a/deps/v8/src/unique.h b/deps/v8/src/unique.h
index a2f29e433..2f6008c5a 100644
--- a/deps/v8/src/unique.h
+++ b/deps/v8/src/unique.h
@@ -142,8 +142,12 @@ class Unique V8_FINAL {
friend class Unique; // For comparing raw_address values.
private:
+ Unique<T>() : raw_address_(NULL) { }
+
Address raw_address_;
Handle<T> handle_;
+
+ friend class SideEffectsTracker;
};
diff --git a/deps/v8/src/uri.h b/deps/v8/src/uri.h
index ee1baeb51..1e73ddd3d 100644
--- a/deps/v8/src/uri.h
+++ b/deps/v8/src/uri.h
@@ -127,9 +127,11 @@ Handle<String> URIUnescape::UnescapeSlow(
int dest_position = 0;
Handle<String> second_part;
+ ASSERT(unescaped_length <= String::kMaxLength);
if (one_byte) {
Handle<SeqOneByteString> dest =
isolate->factory()->NewRawOneByteString(unescaped_length);
+ ASSERT(!dest.is_null());
DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = start_index; i < length; dest_position++) {
@@ -142,6 +144,7 @@ Handle<String> URIUnescape::UnescapeSlow(
} else {
Handle<SeqTwoByteString> dest =
isolate->factory()->NewRawTwoByteString(unescaped_length);
+ ASSERT(!dest.is_null());
DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = start_index; i < length; dest_position++) {
@@ -263,10 +266,7 @@ Handle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
// We don't allow strings that are longer than a maximal length.
ASSERT(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
- if (escaped_length > String::kMaxLength) {
- isolate->context()->mark_out_of_memory();
- return Handle<String>::null();
- }
+ if (escaped_length > String::kMaxLength) break; // Provoke exception.
}
}
@@ -275,6 +275,7 @@ Handle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
Handle<SeqOneByteString> dest =
isolate->factory()->NewRawOneByteString(escaped_length);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, dest, Handle<String>());
int dest_position = 0;
{ DisallowHeapAllocation no_allocation;
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index 846261520..6838cb069 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -97,18 +97,4 @@ char* SimpleStringBuilder::Finalize() {
}
-const DivMagicNumbers DivMagicNumberFor(int32_t divisor) {
- switch (divisor) {
- case 3: return DivMagicNumberFor3;
- case 5: return DivMagicNumberFor5;
- case 7: return DivMagicNumberFor7;
- case 9: return DivMagicNumberFor9;
- case 11: return DivMagicNumberFor11;
- case 25: return DivMagicNumberFor25;
- case 125: return DivMagicNumberFor125;
- case 625: return DivMagicNumberFor625;
- default: return InvalidDivMagicNumber;
- }
-}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 2e7c494d6..753822614 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -105,32 +105,6 @@ inline int MostSignificantBit(uint32_t x) {
}
-// Magic numbers for integer division.
-// These are kind of 2's complement reciprocal of the divisors.
-// Details and proofs can be found in:
-// - Hacker's Delight, Henry S. Warren, Jr.
-// - The PowerPC Compiler Writer’s Guide
-// and probably many others.
-// See details in the implementation of the algorithm in
-// lithium-codegen-arm.cc : LCodeGen::TryEmitSignedIntegerDivisionByConstant().
-struct DivMagicNumbers {
- unsigned M;
- unsigned s;
-};
-
-const DivMagicNumbers InvalidDivMagicNumber= {0, 0};
-const DivMagicNumbers DivMagicNumberFor3 = {0x55555556, 0};
-const DivMagicNumbers DivMagicNumberFor5 = {0x66666667, 1};
-const DivMagicNumbers DivMagicNumberFor7 = {0x92492493, 2};
-const DivMagicNumbers DivMagicNumberFor9 = {0x38e38e39, 1};
-const DivMagicNumbers DivMagicNumberFor11 = {0x2e8ba2e9, 1};
-const DivMagicNumbers DivMagicNumberFor25 = {0x51eb851f, 3};
-const DivMagicNumbers DivMagicNumberFor125 = {0x10624dd3, 3};
-const DivMagicNumbers DivMagicNumberFor625 = {0x68db8bad, 8};
-
-const DivMagicNumbers DivMagicNumberFor(int32_t divisor);
-
-
// The C++ standard leaves the semantics of '>>' undefined for
// negative signed operands. Most implementations do the right thing,
// though.
@@ -172,6 +146,17 @@ inline T RoundUp(T x, intptr_t m) {
}
+// Increment a pointer until it has the specified alignment.
+// This works like RoundUp, but it works correctly on pointer types where
+// sizeof(*pointer) might not be 1.
+template<class T>
+T AlignUp(T pointer, size_t alignment) {
+ ASSERT(sizeof(pointer) == sizeof(uintptr_t));
+ uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
+ return reinterpret_cast<T>(RoundUp(pointer_raw, alignment));
+}
+
+
template <typename T>
int Compare(const T& a, const T& b) {
if (a == b)
@@ -272,6 +257,12 @@ inline int StrLength(const char* string) {
}
+// TODO(svenpanne) Clean up the whole power-of-2 mess.
+inline int32_t WhichPowerOf2Abs(int32_t x) {
+ return (x == kMinInt) ? 31 : WhichPowerOf2(Abs(x));
+}
+
+
// ----------------------------------------------------------------------------
// BitField is a help template for encoding and decode bitfield with
// unsigned content.
@@ -1089,6 +1080,66 @@ class EnumSet {
T bits_;
};
+// Bit field extraction.
+inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) {
+ return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1);
+}
+
+inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) {
+ return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
+}
+
+inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) {
+ return (x << (31 - msb)) >> (lsb + 31 - msb);
+}
+
+inline int signed_bitextract_64(int msb, int lsb, int x) {
+ // TODO(jbramley): This is broken for big bitfields.
+ return (x << (63 - msb)) >> (lsb + 63 - msb);
+}
+
+// Check number width.
+inline bool is_intn(int64_t x, unsigned n) {
+ ASSERT((0 < n) && (n < 64));
+ int64_t limit = static_cast<int64_t>(1) << (n - 1);
+ return (-limit <= x) && (x < limit);
+}
+
+inline bool is_uintn(int64_t x, unsigned n) {
+ ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
+ return !(x >> n);
+}
+
+template <class T>
+inline T truncate_to_intn(T x, unsigned n) {
+ ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
+ return (x & ((static_cast<T>(1) << n) - 1));
+}
+
+#define INT_1_TO_63_LIST(V) \
+V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \
+V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \
+V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \
+V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \
+V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
+V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \
+V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \
+V(57) V(58) V(59) V(60) V(61) V(62) V(63)
+
+#define DECLARE_IS_INT_N(N) \
+inline bool is_int##N(int64_t x) { return is_intn(x, N); }
+#define DECLARE_IS_UINT_N(N) \
+template <class T> \
+inline bool is_uint##N(T x) { return is_uintn(x, N); }
+#define DECLARE_TRUNCATE_TO_INT_N(N) \
+template <class T> \
+inline T truncate_to_int##N(T x) { return truncate_to_intn(x, N); }
+INT_1_TO_63_LIST(DECLARE_IS_INT_N)
+INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
+INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
+#undef DECLARE_IS_INT_N
+#undef DECLARE_IS_UINT_N
+#undef DECLARE_TRUNCATE_TO_INT_N
class TypeFeedbackId {
public:
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index b89bb7a69..b49e0eb5f 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -82,6 +82,8 @@ bool V8::Initialize(Deserializer* des) {
#ifdef V8_USE_DEFAULT_PLATFORM
DefaultPlatform* platform = static_cast<DefaultPlatform*>(platform_);
platform->SetThreadPoolSize(isolate->max_available_threads());
+ // We currently only start the threads early, if we know that we'll use them.
+ if (FLAG_job_based_sweeping) platform->EnsureInitialized();
#endif
return isolate->Init(des);
@@ -148,15 +150,16 @@ void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
void V8::FireCallCompletedCallback(Isolate* isolate) {
bool has_call_completed_callbacks = call_completed_callbacks_ != NULL;
- bool microtask_pending = isolate->microtask_pending();
- if (!has_call_completed_callbacks && !microtask_pending) return;
+ bool run_microtasks = isolate->autorun_microtasks() &&
+ isolate->microtask_pending();
+ if (!has_call_completed_callbacks && !run_microtasks) return;
HandleScopeImplementer* handle_scope_implementer =
isolate->handle_scope_implementer();
if (!handle_scope_implementer->CallDepthIsZero()) return;
// Fire callbacks. Increase call depth to prevent recursive callbacks.
handle_scope_implementer->IncrementCallDepth();
- if (microtask_pending) Execution::RunMicrotasks(isolate);
+ if (run_microtasks) Execution::RunMicrotasks(isolate);
if (has_call_completed_callbacks) {
for (int i = 0; i < call_completed_callbacks_->length(); i++) {
call_completed_callbacks_->at(i)();
@@ -166,15 +169,27 @@ void V8::FireCallCompletedCallback(Isolate* isolate) {
}
+void V8::RunMicrotasks(Isolate* isolate) {
+ if (!isolate->microtask_pending())
+ return;
+
+ HandleScopeImplementer* handle_scope_implementer =
+ isolate->handle_scope_implementer();
+ ASSERT(handle_scope_implementer->CallDepthIsZero());
+
+ // Increase call depth to prevent recursive callbacks.
+ handle_scope_implementer->IncrementCallDepth();
+ Execution::RunMicrotasks(isolate);
+ handle_scope_implementer->DecrementCallDepth();
+}
+
+
void V8::InitializeOncePerProcessImpl() {
FlagList::EnforceFlagImplications();
- if (FLAG_predictable) {
- if (FLAG_random_seed == 0) {
- // Avoid random seeds in predictable mode.
- FLAG_random_seed = 12347;
- }
- FLAG_hash_seed = 0;
+ if (FLAG_predictable && FLAG_random_seed == 0) {
+ // Avoid random seeds in predictable mode.
+ FLAG_random_seed = 12347;
}
if (FLAG_stress_compaction) {
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index 8069e8add..d3f5a9c83 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -101,6 +101,8 @@ class V8 : public AllStatic {
static void RemoveCallCompletedCallback(CallCompletedCallback callback);
static void FireCallCompletedCallback(Isolate* isolate);
+ static void RunMicrotasks(Isolate* isolate);
+
static v8::ArrayBuffer::Allocator* ArrayBufferAllocator() {
return array_buffer_allocator_;
}
diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h
index 7d8d1b7e4..e6cd94df2 100644
--- a/deps/v8/src/v8globals.h
+++ b/deps/v8/src/v8globals.h
@@ -133,6 +133,7 @@ class Heap;
class HeapObject;
class IC;
class InterceptorInfo;
+class Isolate;
class JSReceiver;
class JSArray;
class JSFunction;
@@ -465,11 +466,11 @@ enum VariableMode {
// User declared variables:
VAR, // declared via 'var', and 'function' declarations
- CONST, // declared via 'const' declarations
+ CONST_LEGACY, // declared via legacy 'const' declarations
LET, // declared via 'let' declarations (first lexical)
- CONST_HARMONY, // declared via 'const' declarations in harmony mode
+ CONST, // declared via 'const' declarations
MODULE, // declared via 'module' declaration (last lexical)
@@ -510,7 +511,7 @@ inline bool IsLexicalVariableMode(VariableMode mode) {
inline bool IsImmutableVariableMode(VariableMode mode) {
- return mode == CONST || (mode >= CONST_HARMONY && mode <= MODULE);
+ return (mode >= CONST && mode <= MODULE) || mode == CONST_LEGACY;
}
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index df663c025..f183afb96 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -1282,7 +1282,7 @@ function ObjectFreeze(obj) {
throw MakeTypeError("called_on_non_object", ["Object.freeze"]);
}
var isProxy = %IsJSProxy(obj);
- if (isProxy || %HasNonStrictArgumentsElements(obj) || %IsObserved(obj)) {
+ if (isProxy || %HasSloppyArgumentsElements(obj) || %IsObserved(obj)) {
if (isProxy) {
ProxyFix(obj);
}
@@ -1384,15 +1384,19 @@ function ObjectIs(obj1, obj2) {
}
-// Harmony __proto__ getter.
+// ECMA-262, Edition 6, section B.2.2.1.1
function ObjectGetProto() {
- return %GetPrototype(this);
+ return %GetPrototype(ToObject(this));
}
-// Harmony __proto__ setter.
-function ObjectSetProto(obj) {
- return %SetPrototype(this, obj);
+// ECMA-262, Edition 6, section B.2.2.1.2
+function ObjectSetProto(proto) {
+ CHECK_OBJECT_COERCIBLE(this, "Object.prototype.__proto__");
+
+ if ((IS_SPEC_OBJECT(proto) || IS_NULL(proto)) && IS_SPEC_OBJECT(this)) {
+ %SetPrototype(this, proto);
+ }
}
@@ -1889,10 +1893,30 @@ SetUpFunction();
// Eventually, we should move to a real event queue that allows to maintain
// relative ordering of different kinds of tasks.
-RunMicrotasks.runners = new InternalArray;
+function GetMicrotaskQueue() {
+ var microtaskState = %GetMicrotaskState();
+ if (IS_UNDEFINED(microtaskState.queue)) {
+ microtaskState.queue = new InternalArray;
+ }
+ return microtaskState.queue;
+}
function RunMicrotasks() {
while (%SetMicrotaskPending(false)) {
- for (var i in RunMicrotasks.runners) RunMicrotasks.runners[i]();
+ var microtaskState = %GetMicrotaskState();
+ if (IS_UNDEFINED(microtaskState.queue))
+ return;
+
+ var microtasks = microtaskState.queue;
+ microtaskState.queue = new InternalArray;
+
+ for (var i = 0; i < microtasks.length; i++) {
+ microtasks[i]();
+ }
}
}
+
+function EnqueueExternalMicrotask(fn) {
+ GetMicrotaskQueue().push(fn);
+ %SetMicrotaskPending(true);
+}
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc
index 488da42ce..6c4ea527c 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/variables.cc
@@ -40,9 +40,9 @@ namespace internal {
const char* Variable::Mode2String(VariableMode mode) {
switch (mode) {
case VAR: return "VAR";
- case CONST: return "CONST";
+ case CONST_LEGACY: return "CONST_LEGACY";
case LET: return "LET";
- case CONST_HARMONY: return "CONST_HARMONY";
+ case CONST: return "CONST";
case MODULE: return "MODULE";
case DYNAMIC: return "DYNAMIC";
case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index 39451d5df..401d04446 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -168,7 +168,7 @@ class Variable: public ZoneObject {
// If this field is set, this variable references the stored locally bound
// variable, but it might be shadowed by variable bindings introduced by
- // non-strict 'eval' calls between the reference scope (inclusive) and the
+ // sloppy 'eval' calls between the reference scope (inclusive) and the
// binding scope (exclusive).
Variable* local_if_not_shadowed_;
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 9ba044d5a..904b067a6 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,9 +33,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 24
-#define BUILD_NUMBER 35
-#define PATCH_LEVEL 22
+#define MINOR_VERSION 25
+#define BUILD_NUMBER 30
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h
index 658773e6d..5bee438b6 100644
--- a/deps/v8/src/vm-state-inl.h
+++ b/deps/v8/src/vm-state-inl.h
@@ -85,8 +85,7 @@ ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
callback_(callback),
previous_scope_(isolate->external_callback_scope()) {
#ifdef USE_SIMULATOR
- int32_t sp = Simulator::current(isolate)->get_register(Simulator::sp);
- scope_address_ = reinterpret_cast<Address>(static_cast<intptr_t>(sp));
+ scope_address_ = Simulator::current(isolate)->get_sp();
#endif
isolate_->set_external_callback_scope(this);
}
diff --git a/deps/v8/src/weak_collection.js b/deps/v8/src/weak_collection.js
new file mode 100644
index 000000000..81d4ab536
--- /dev/null
+++ b/deps/v8/src/weak_collection.js
@@ -0,0 +1,206 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
+var $WeakMap = global.WeakMap;
+var $WeakSet = global.WeakSet;
+
+
+// -------------------------------------------------------------------
+// Harmony WeakMap
+
+function WeakMapConstructor() {
+ if (%_IsConstructCall()) {
+ %WeakCollectionInitialize(this);
+ } else {
+ throw MakeTypeError('constructor_not_function', ['WeakMap']);
+ }
+}
+
+
+function WeakMapGet(key) {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.get', this]);
+ }
+ if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
+ throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+ }
+ return %WeakCollectionGet(this, key);
+}
+
+
+function WeakMapSet(key, value) {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.set', this]);
+ }
+ if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
+ throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+ }
+ return %WeakCollectionSet(this, key, value);
+}
+
+
+function WeakMapHas(key) {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.has', this]);
+ }
+ if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
+ throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+ }
+ return %WeakCollectionHas(this, key);
+}
+
+
+function WeakMapDelete(key) {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.delete', this]);
+ }
+ if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
+ throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+ }
+ return %WeakCollectionDelete(this, key);
+}
+
+
+function WeakMapClear() {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.clear', this]);
+ }
+ // Replace the internal table with a new empty table.
+ %WeakCollectionInitialize(this);
+}
+
+
+// -------------------------------------------------------------------
+
+function SetUpWeakMap() {
+ %CheckIsBootstrapping();
+
+ %SetCode($WeakMap, WeakMapConstructor);
+ %FunctionSetPrototype($WeakMap, new $Object());
+ %SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
+
+ // Set up the non-enumerable functions on the WeakMap prototype object.
+ InstallFunctions($WeakMap.prototype, DONT_ENUM, $Array(
+ "get", WeakMapGet,
+ "set", WeakMapSet,
+ "has", WeakMapHas,
+ "delete", WeakMapDelete,
+ "clear", WeakMapClear
+ ));
+}
+
+SetUpWeakMap();
+
+
+// -------------------------------------------------------------------
+// Harmony WeakSet
+
+function WeakSetConstructor() {
+ if (%_IsConstructCall()) {
+ %WeakCollectionInitialize(this);
+ } else {
+ throw MakeTypeError('constructor_not_function', ['WeakSet']);
+ }
+}
+
+
+function WeakSetAdd(value) {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.add', this]);
+ }
+ if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+ throw %MakeTypeError('invalid_weakset_value', [this, value]);
+ }
+ return %WeakCollectionSet(this, value, true);
+}
+
+
+function WeakSetHas(value) {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.has', this]);
+ }
+ if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+ throw %MakeTypeError('invalid_weakset_value', [this, value]);
+ }
+ return %WeakCollectionHas(this, value);
+}
+
+
+function WeakSetDelete(value) {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.delete', this]);
+ }
+ if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+ throw %MakeTypeError('invalid_weakset_value', [this, value]);
+ }
+ return %WeakCollectionDelete(this, value);
+}
+
+
+function WeakSetClear() {
+ if (!IS_WEAKSET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakSet.prototype.clear', this]);
+ }
+ // Replace the internal table with a new empty table.
+ %WeakCollectionInitialize(this);
+}
+
+
+// -------------------------------------------------------------------
+
+function SetUpWeakSet() {
+ %CheckIsBootstrapping();
+
+ %SetCode($WeakSet, WeakSetConstructor);
+ %FunctionSetPrototype($WeakSet, new $Object());
+ %SetProperty($WeakSet.prototype, "constructor", $WeakSet, DONT_ENUM);
+
+ // Set up the non-enumerable functions on the WeakSet prototype object.
+ InstallFunctions($WeakSet.prototype, DONT_ENUM, $Array(
+ "add", WeakSetAdd,
+ "has", WeakSetHas,
+ "delete", WeakSetDelete,
+ "clear", WeakSetClear
+ ));
+}
+
+SetUpWeakSet();
diff --git a/deps/v8/src/win32-headers.h b/deps/v8/src/win32-headers.h
index 98b0120ea..ba595b97d 100644
--- a/deps/v8/src/win32-headers.h
+++ b/deps/v8/src/win32-headers.h
@@ -94,6 +94,7 @@
#undef NONE
#undef ANY
#undef IGNORE
+#undef STRICT
#undef GetObject
#undef CreateSemaphore
#undef Yield
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 073fcbe8e..a559b6275 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -205,12 +205,15 @@ void Assembler::emit_optional_rex_32(const Operand& op) {
}
-Address Assembler::target_address_at(Address pc) {
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
return Memory::int32_at(pc) + pc + 4;
}
-void Assembler::set_target_address_at(Address pc, Address target) {
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target) {
Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
CPU::FlushICache(pc, sizeof(int32_t));
}
@@ -255,7 +258,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -267,6 +270,12 @@ Address RelocInfo::target_address_address() {
}
+Address RelocInfo::constant_pool_entry_address() {
+ UNREACHABLE();
+ return NULL;
+}
+
+
int RelocInfo::target_address_size() {
if (IsCodedSpecially()) {
return Assembler::kSpecialTargetSize;
@@ -278,7 +287,7 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, target);
+ Assembler::set_target_address_at(pc_, host_, target);
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -369,7 +378,7 @@ void RelocInfo::WipeOut() {
Memory::Address_at(pc_) = NULL;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(pc_, pc_ + sizeof(int32_t));
+ Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
}
@@ -408,14 +417,14 @@ Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
ASSERT(*pc_ == kCallOpcode);
return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + 1));
+ Assembler::target_address_at(pc_ + 1, host_));
}
void RelocInfo::set_code_age_stub(Code* stub) {
ASSERT(*pc_ == kCallOpcode);
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
+ Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start());
}
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index e7c20bb15..60383da01 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -110,7 +110,8 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
#endif
// Patch the code.
- patcher.masm()->movp(kScratchRegister, target, Assembler::RelocInfoNone());
+ patcher.masm()->movp(kScratchRegister, reinterpret_cast<void*>(target),
+ Assembler::RelocInfoNone());
patcher.masm()->call(kScratchRegister);
// Check that the size of the code generated is as expected.
@@ -750,6 +751,15 @@ void Assembler::bts(const Operand& dst, Register src) {
}
+void Assembler::bsrl(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xBD);
+ emit_modrm(dst, src);
+}
+
+
void Assembler::call(Label* L) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
@@ -934,33 +944,17 @@ void Assembler::cqo() {
}
-void Assembler::decq(Register dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_modrm(0x1, dst);
-}
-
-
-void Assembler::decq(const Operand& dst) {
+void Assembler::emit_dec(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_operand(1, dst);
-}
-
-
-void Assembler::decl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_modrm(0x1, dst);
}
-void Assembler::decl(const Operand& dst) {
+void Assembler::emit_dec(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_operand(1, dst);
}
@@ -999,84 +993,43 @@ void Assembler::hlt() {
}
-void Assembler::idivq(Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src);
- emit(0xF7);
- emit_modrm(0x7, src);
-}
-
-
-void Assembler::idivl(Register src) {
+void Assembler::emit_idiv(Register src, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(src);
+ emit_rex(src, size);
emit(0xF7);
emit_modrm(0x7, src);
}
-void Assembler::imul(Register src) {
+void Assembler::emit_imul(Register src, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(src);
+ emit_rex(src, size);
emit(0xF7);
emit_modrm(0x5, src);
}
-void Assembler::imul(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::imul(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::imul(Register dst, Register src, Immediate imm) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- if (is_int8(imm.value_)) {
- emit(0x6B);
- emit_modrm(dst, src);
- emit(imm.value_);
- } else {
- emit(0x69);
- emit_modrm(dst, src);
- emitl(imm.value_);
- }
-}
-
-
-void Assembler::imull(Register dst, Register src) {
+void Assembler::emit_imul(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x0F);
emit(0xAF);
emit_modrm(dst, src);
}
-void Assembler::imull(Register dst, const Operand& src) {
+void Assembler::emit_imul(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x0F);
emit(0xAF);
emit_operand(dst, src);
}
-void Assembler::imull(Register dst, Register src, Immediate imm) {
+void Assembler::emit_imul(Register dst, Register src, Immediate imm, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
if (is_int8(imm.value_)) {
emit(0x6B);
emit_modrm(dst, src);
@@ -1089,38 +1042,22 @@ void Assembler::imull(Register dst, Register src, Immediate imm) {
}
-void Assembler::incq(Register dst) {
+void Assembler::emit_inc(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_modrm(0x0, dst);
}
-void Assembler::incq(const Operand& dst) {
+void Assembler::emit_inc(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_operand(0, dst);
-}
-
-
-void Assembler::incl(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_operand(0, dst);
}
-void Assembler::incl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xFF);
- emit_modrm(0, dst);
-}
-
-
void Assembler::int3() {
EnsureSpace ensure_space(this);
emit(0xCC);
@@ -1287,17 +1224,9 @@ void Assembler::jmp(const Operand& src) {
}
-void Assembler::lea(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x8D);
- emit_operand(dst, src);
-}
-
-
-void Assembler::leal(Register dst, const Operand& src) {
+void Assembler::emit_lea(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x8D);
emit_operand(dst, src);
}
@@ -1536,7 +1465,7 @@ void Assembler::movsxlq(Register dst, const Operand& src) {
}
-void Assembler::movzxbq(Register dst, const Operand& src) {
+void Assembler::emit_movzxb(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
// 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
// there is no need to make this a 64 bit operation.
@@ -1547,26 +1476,10 @@ void Assembler::movzxbq(Register dst, const Operand& src) {
}
-void Assembler::movzxbl(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB6);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxwq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB7);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxwl(Register dst, const Operand& src) {
+void Assembler::emit_movzxw(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
+ // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
+ // there is no need to make this a 64 bit operation.
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB7);
@@ -1574,8 +1487,10 @@ void Assembler::movzxwl(Register dst, const Operand& src) {
}
-void Assembler::movzxwl(Register dst, Register src) {
+void Assembler::emit_movzxw(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
+ // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
+ // there is no need to make this a 64 bit operation.
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB7);
@@ -1598,17 +1513,10 @@ void Assembler::repmovsw() {
}
-void Assembler::repmovsl() {
+void Assembler::emit_repmovs(int size) {
EnsureSpace ensure_space(this);
emit(0xF3);
- emit(0xA5);
-}
-
-
-void Assembler::repmovsq() {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_rex_64();
+ emit_rex(size);
emit(0xA5);
}
@@ -1621,23 +1529,15 @@ void Assembler::mul(Register src) {
}
-void Assembler::neg(Register dst) {
+void Assembler::emit_neg(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0x3, dst);
-}
-
-
-void Assembler::negl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xF7);
emit_modrm(0x3, dst);
}
-void Assembler::neg(const Operand& dst) {
+void Assembler::emit_neg(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xF7);
@@ -1651,30 +1551,22 @@ void Assembler::nop() {
}
-void Assembler::not_(Register dst) {
+void Assembler::emit_not(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xF7);
emit_modrm(0x2, dst);
}
-void Assembler::not_(const Operand& dst) {
+void Assembler::emit_not(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xF7);
emit_operand(2, dst);
}
-void Assembler::notl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xF7);
- emit_modrm(0x2, dst);
-}
-
-
void Assembler::Nop(int n) {
// The recommended muti-byte sequences of NOP instructions from the Intel 64
// and IA-32 Architectures Software Developer's Manual.
@@ -1752,14 +1644,14 @@ void Assembler::Nop(int n) {
}
-void Assembler::pop(Register dst) {
+void Assembler::popq(Register dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0x58 | dst.low_bits());
}
-void Assembler::pop(const Operand& dst) {
+void Assembler::popq(const Operand& dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0x8F);
@@ -1773,14 +1665,14 @@ void Assembler::popfq() {
}
-void Assembler::push(Register src) {
+void Assembler::pushq(Register src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
emit(0x50 | src.low_bits());
}
-void Assembler::push(const Operand& src) {
+void Assembler::pushq(const Operand& src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
emit(0xFF);
@@ -1788,7 +1680,7 @@ void Assembler::push(const Operand& src) {
}
-void Assembler::push(Immediate value) {
+void Assembler::pushq(Immediate value) {
EnsureSpace ensure_space(this);
if (is_int8(value.value_)) {
emit(0x6A);
@@ -1800,7 +1692,7 @@ void Assembler::push(Immediate value) {
}
-void Assembler::push_imm32(int32_t imm32) {
+void Assembler::pushq_imm32(int32_t imm32) {
EnsureSpace ensure_space(this);
emit(0x68);
emitl(imm32);
@@ -1860,36 +1752,18 @@ void Assembler::shrd(Register dst, Register src) {
}
-void Assembler::xchgq(Register dst, Register src) {
+void Assembler::emit_xchg(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
Register other = src.is(rax) ? dst : src;
- emit_rex_64(other);
+ emit_rex(other, size);
emit(0x90 | other.low_bits());
} else if (dst.low_bits() == 4) {
- emit_rex_64(dst, src);
- emit(0x87);
- emit_modrm(dst, src);
- } else {
- emit_rex_64(src, dst);
- emit(0x87);
- emit_modrm(src, dst);
- }
-}
-
-
-void Assembler::xchgl(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
- Register other = src.is(rax) ? dst : src;
- emit_optional_rex_32(other);
- emit(0x90 | other.low_bits());
- } else if (dst.low_bits() == 4) {
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x87);
emit_modrm(dst, src);
} else {
- emit_optional_rex_32(src, dst);
+ emit_rex(src, dst, size);
emit(0x87);
emit_modrm(src, dst);
}
@@ -1977,21 +1851,21 @@ void Assembler::testb(const Operand& op, Register reg) {
}
-void Assembler::testl(Register dst, Register src) {
+void Assembler::emit_test(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
- emit_optional_rex_32(src, dst);
+ emit_rex(src, dst, size);
emit(0x85);
emit_modrm(src, dst);
} else {
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x85);
emit_modrm(dst, src);
}
}
-void Assembler::testl(Register reg, Immediate mask) {
+void Assembler::emit_test(Register reg, Immediate mask, int size) {
// testl with a mask that fits in the low byte is exactly testb.
if (is_uint8(mask.value_)) {
testb(reg, mask);
@@ -1999,10 +1873,11 @@ void Assembler::testl(Register reg, Immediate mask) {
}
EnsureSpace ensure_space(this);
if (reg.is(rax)) {
+ emit_rex(rax, size);
emit(0xA9);
emit(mask);
} else {
- emit_optional_rex_32(rax, reg);
+ emit_rex(reg, size);
emit(0xF7);
emit_modrm(0x0, reg);
emit(mask);
@@ -2010,69 +1885,28 @@ void Assembler::testl(Register reg, Immediate mask) {
}
-void Assembler::testl(const Operand& op, Immediate mask) {
+void Assembler::emit_test(const Operand& op, Immediate mask, int size) {
// testl with a mask that fits in the low byte is exactly testb.
if (is_uint8(mask.value_)) {
testb(op, mask);
return;
}
EnsureSpace ensure_space(this);
- emit_optional_rex_32(rax, op);
+ emit_rex(rax, op, size);
emit(0xF7);
emit_operand(rax, op); // Operation code 0
emit(mask);
}
-void Assembler::testl(const Operand& op, Register reg) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(reg, op);
- emit(0x85);
- emit_operand(reg, op);
-}
-
-
-void Assembler::testq(const Operand& op, Register reg) {
+void Assembler::emit_test(const Operand& op, Register reg, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(reg, op);
+ emit_rex(reg, op, size);
emit(0x85);
emit_operand(reg, op);
}
-void Assembler::testq(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_rex_64(src, dst);
- emit(0x85);
- emit_modrm(src, dst);
- } else {
- emit_rex_64(dst, src);
- emit(0x85);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::testq(Register dst, Immediate mask) {
- if (is_uint8(mask.value_)) {
- testb(dst, mask);
- return;
- }
- EnsureSpace ensure_space(this);
- if (dst.is(rax)) {
- emit_rex_64();
- emit(0xA9);
- emit(mask);
- } else {
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0, dst);
- emit(mask);
- }
-}
-
-
// FPU instructions.
@@ -2789,6 +2623,16 @@ void Assembler::movss(const Operand& src, XMMRegister dst) {
}
+void Assembler::psllq(XMMRegister reg, byte imm8) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit(0x0F);
+ emit(0x73);
+ emit_sse_operand(rsi, reg); // rsi == 6
+ emit(imm8);
+}
+
+
void Assembler::cvttss2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3172,6 +3016,19 @@ void Assembler::RecordComment(const char* msg, bool force) {
}
+MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
1 << RelocInfo::RUNTIME_ENTRY |
1 << RelocInfo::INTERNAL_REFERENCE |
@@ -3185,6 +3042,12 @@ bool RelocInfo::IsCodedSpecially() {
return (1 << rmode_) & kApplyMask;
}
+
+bool RelocInfo::IsInConstantPool() {
+ return false;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index ef513d1e5..d47ca32e0 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -44,27 +44,6 @@ namespace internal {
// Utility functions
-// Test whether a 64-bit value is in a specific range.
-inline bool is_uint32(int64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return static_cast<uint64_t>(x) <= kMaxUInt32;
-}
-
-inline bool is_int32(int64_t x) {
- static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
- return is_uint32(x - kMinInt32);
-}
-
-inline bool uint_is_int32(uint64_t x) {
- static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
- return x <= kMaxInt32;
-}
-
-inline bool is_uint32(uint64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return x <= kMaxUInt32;
-}
-
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -530,8 +509,27 @@ class CpuFeatures : public AllStatic {
};
-#define ASSEMBLER_INSTRUCTION_LIST(V) \
- V(mov)
+#define ASSEMBLER_INSTRUCTION_LIST(V) \
+ V(add) \
+ V(and) \
+ V(cmp) \
+ V(dec) \
+ V(idiv) \
+ V(imul) \
+ V(inc) \
+ V(lea) \
+ V(mov) \
+ V(movzxb) \
+ V(movzxw) \
+ V(neg) \
+ V(not) \
+ V(or) \
+ V(repmovs) \
+ V(sbb) \
+ V(sub) \
+ V(test) \
+ V(xchg) \
+ V(xor)
class Assembler : public AssemblerBase {
@@ -576,8 +574,21 @@ class Assembler : public AssemblerBase {
// the absolute address of the target.
// These functions convert between absolute Addresses of Code objects and
// the relative displacements stored in the code.
- static inline Address target_address_at(Address pc);
- static inline void set_target_address_at(Address pc, Address target);
+ static inline Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool);
+ static inline void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target);
+ static inline Address target_address_at(Address pc, Code* code) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ static inline void set_target_address_at(Address pc,
+ Code* code,
+ Address target) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target);
+ }
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -586,8 +597,8 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Address target) {
- set_target_address_at(instruction_payload, target);
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload, code, target);
}
static inline RelocInfo::Mode RelocInfoNone() {
@@ -667,11 +678,24 @@ class Assembler : public AssemblerBase {
// - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
// - Instructions on 32-bit (doubleword) operands/registers use 'l'.
// - Instructions on 64-bit (quadword) operands/registers use 'q'.
- //
- // Some mnemonics, such as "and", are the same as C++ keywords.
- // Naming conflicts with C++ keywords are resolved by adding a trailing '_'.
+ // - Instructions on operands/registers with pointer size use 'p'.
#define DECLARE_INSTRUCTION(instruction) \
+ template<class P1> \
+ void instruction##p(P1 p1) { \
+ emit_##instruction(p1, kPointerSize); \
+ } \
+ \
+ template<class P1> \
+ void instruction##l(P1 p1) { \
+ emit_##instruction(p1, kInt32Size); \
+ } \
+ \
+ template<class P1> \
+ void instruction##q(P1 p1) { \
+ emit_##instruction(p1, kInt64Size); \
+ } \
+ \
template<class P1, class P2> \
void instruction##p(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kPointerSize); \
@@ -685,6 +709,21 @@ class Assembler : public AssemblerBase {
template<class P1, class P2> \
void instruction##q(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kInt64Size); \
+ } \
+ \
+ template<class P1, class P2, class P3> \
+ void instruction##p(P1 p1, P2 p2, P3 p3) { \
+ emit_##instruction(p1, p2, p3, kPointerSize); \
+ } \
+ \
+ template<class P1, class P2, class P3> \
+ void instruction##l(P1 p1, P2 p2, P3 p3) { \
+ emit_##instruction(p1, p2, p3, kInt32Size); \
+ } \
+ \
+ template<class P1, class P2, class P3> \
+ void instruction##q(P1 p1, P2 p2, P3 p3) { \
+ emit_##instruction(p1, p2, p3, kInt64Size); \
}
ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION)
#undef DECLARE_INSTRUCTION
@@ -701,15 +740,15 @@ class Assembler : public AssemblerBase {
void pushfq();
void popfq();
- void push(Immediate value);
+ void pushq(Immediate value);
// Push a 32 bit integer, and guarantee that it is actually pushed as a
// 32 bit value, the normal push will optimize the 8 bit case.
- void push_imm32(int32_t imm32);
- void push(Register src);
- void push(const Operand& src);
+ void pushq_imm32(int32_t imm32);
+ void pushq(Register src);
+ void pushq(const Operand& src);
- void pop(Register dst);
- void pop(const Operand& dst);
+ void popq(Register dst);
+ void popq(const Operand& dst);
void enter(Immediate size);
void leave();
@@ -741,18 +780,14 @@ class Assembler : public AssemblerBase {
void movsxwq(Register dst, const Operand& src);
void movsxlq(Register dst, Register src);
void movsxlq(Register dst, const Operand& src);
- void movzxbq(Register dst, const Operand& src);
- void movzxbl(Register dst, const Operand& src);
- void movzxwq(Register dst, const Operand& src);
- void movzxwl(Register dst, const Operand& src);
- void movzxwl(Register dst, Register src);
// Repeated moves.
void repmovsb();
void repmovsw();
- void repmovsl();
- void repmovsq();
+ void repmovsp() { emit_repmovs(kPointerSize); }
+ void repmovsl() { emit_repmovs(kInt32Size); }
+ void repmovsq() { emit_repmovs(kInt64Size); }
// Instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(void* ptr, RelocInfo::Mode rmode);
@@ -764,59 +799,6 @@ class Assembler : public AssemblerBase {
void cmovl(Condition cc, Register dst, Register src);
void cmovl(Condition cc, Register dst, const Operand& src);
- // Exchange two registers
- void xchgq(Register dst, Register src);
- void xchgl(Register dst, Register src);
-
- // Arithmetics
- void addl(Register dst, Register src) {
- arithmetic_op_32(0x03, dst, src);
- }
-
- void addl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
- void addl(Register dst, const Operand& src) {
- arithmetic_op_32(0x03, dst, src);
- }
-
- void addl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
- void addl(const Operand& dst, Register src) {
- arithmetic_op_32(0x01, src, dst);
- }
-
- void addq(Register dst, Register src) {
- arithmetic_op(0x03, dst, src);
- }
-
- void addq(Register dst, const Operand& src) {
- arithmetic_op(0x03, dst, src);
- }
-
- void addq(const Operand& dst, Register src) {
- arithmetic_op(0x01, src, dst);
- }
-
- void addq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x0, dst, src);
- }
-
- void addq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x0, dst, src);
- }
-
- void sbbl(Register dst, Register src) {
- arithmetic_op_32(0x1b, dst, src);
- }
-
- void sbbq(Register dst, Register src) {
- arithmetic_op(0x1b, dst, src);
- }
-
void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
@@ -859,86 +841,10 @@ class Assembler : public AssemblerBase {
arithmetic_op_16(0x39, src, dst);
}
- void cmpl(Register dst, Register src) {
- arithmetic_op_32(0x3B, dst, src);
- }
-
- void cmpl(Register dst, const Operand& src) {
- arithmetic_op_32(0x3B, dst, src);
- }
-
- void cmpl(const Operand& dst, Register src) {
- arithmetic_op_32(0x39, src, dst);
- }
-
- void cmpl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
- void cmpl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
- void cmpq(Register dst, Register src) {
- arithmetic_op(0x3B, dst, src);
- }
-
- void cmpq(Register dst, const Operand& src) {
- arithmetic_op(0x3B, dst, src);
- }
-
- void cmpq(const Operand& dst, Register src) {
- arithmetic_op(0x39, src, dst);
- }
-
- void cmpq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x7, dst, src);
- }
-
- void cmpq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x7, dst, src);
- }
-
- void and_(Register dst, Register src) {
- arithmetic_op(0x23, dst, src);
- }
-
- void and_(Register dst, const Operand& src) {
- arithmetic_op(0x23, dst, src);
- }
-
- void and_(const Operand& dst, Register src) {
- arithmetic_op(0x21, src, dst);
- }
-
- void and_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x4, dst, src);
- }
-
- void and_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x4, dst, src);
- }
-
- void andl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x4, dst, src);
- }
-
- void andl(Register dst, Register src) {
- arithmetic_op_32(0x23, dst, src);
- }
-
- void andl(Register dst, const Operand& src) {
- arithmetic_op_32(0x23, dst, src);
- }
-
void andb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x4, dst, src);
}
- void decq(Register dst);
- void decq(const Operand& dst);
- void decl(Register dst);
- void decl(const Operand& dst);
void decb(Register dst);
void decb(const Operand& dst);
@@ -947,80 +853,9 @@ class Assembler : public AssemblerBase {
// Sign-extends eax into edx:eax.
void cdq();
- // Divide rdx:rax by src. Quotient in rax, remainder in rdx.
- void idivq(Register src);
- // Divide edx:eax by lower 32 bits of src. Quotient in eax, rem. in edx.
- void idivl(Register src);
-
- // Signed multiply instructions.
- void imul(Register src); // rdx:rax = rax * src.
- void imul(Register dst, Register src); // dst = dst * src.
- void imul(Register dst, const Operand& src); // dst = dst * src.
- void imul(Register dst, Register src, Immediate imm); // dst = src * imm.
- // Signed 32-bit multiply instructions.
- void imull(Register dst, Register src); // dst = dst * src.
- void imull(Register dst, const Operand& src); // dst = dst * src.
- void imull(Register dst, Register src, Immediate imm); // dst = src * imm.
-
- void incq(Register dst);
- void incq(const Operand& dst);
- void incl(Register dst);
- void incl(const Operand& dst);
-
- void lea(Register dst, const Operand& src);
- void leal(Register dst, const Operand& src);
-
// Multiply rax by src, put the result in rdx:rax.
void mul(Register src);
- void neg(Register dst);
- void neg(const Operand& dst);
- void negl(Register dst);
-
- void not_(Register dst);
- void not_(const Operand& dst);
- void notl(Register dst);
-
- void or_(Register dst, Register src) {
- arithmetic_op(0x0B, dst, src);
- }
-
- void orl(Register dst, Register src) {
- arithmetic_op_32(0x0B, dst, src);
- }
-
- void or_(Register dst, const Operand& src) {
- arithmetic_op(0x0B, dst, src);
- }
-
- void orl(Register dst, const Operand& src) {
- arithmetic_op_32(0x0B, dst, src);
- }
-
- void or_(const Operand& dst, Register src) {
- arithmetic_op(0x09, src, dst);
- }
-
- void orl(const Operand& dst, Register src) {
- arithmetic_op_32(0x09, src, dst);
- }
-
- void or_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x1, dst, src);
- }
-
- void orl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
-
- void or_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x1, dst, src);
- }
-
- void orl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
-
void rcl(Register dst, Immediate imm8) {
shift(dst, imm8, 0x2);
}
@@ -1112,46 +947,6 @@ class Assembler : public AssemblerBase {
void store_rax(void* dst, RelocInfo::Mode mode);
void store_rax(ExternalReference ref);
- void subq(Register dst, Register src) {
- arithmetic_op(0x2B, dst, src);
- }
-
- void subq(Register dst, const Operand& src) {
- arithmetic_op(0x2B, dst, src);
- }
-
- void subq(const Operand& dst, Register src) {
- arithmetic_op(0x29, src, dst);
- }
-
- void subq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x5, dst, src);
- }
-
- void subq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x5, dst, src);
- }
-
- void subl(Register dst, Register src) {
- arithmetic_op_32(0x2B, dst, src);
- }
-
- void subl(Register dst, const Operand& src) {
- arithmetic_op_32(0x2B, dst, src);
- }
-
- void subl(const Operand& dst, Register src) {
- arithmetic_op_32(0x29, src, dst);
- }
-
- void subl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x5, dst, src);
- }
-
- void subl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x5, dst, src);
- }
-
void subb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x5, dst, src);
}
@@ -1160,61 +955,11 @@ class Assembler : public AssemblerBase {
void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask);
void testb(const Operand& op, Register reg);
- void testl(Register dst, Register src);
- void testl(Register reg, Immediate mask);
- void testl(const Operand& op, Register reg);
- void testl(const Operand& op, Immediate mask);
- void testq(const Operand& op, Register reg);
- void testq(Register dst, Register src);
- void testq(Register dst, Immediate mask);
-
- void xor_(Register dst, Register src) {
- if (dst.code() == src.code()) {
- arithmetic_op_32(0x33, dst, src);
- } else {
- arithmetic_op(0x33, dst, src);
- }
- }
-
- void xorl(Register dst, Register src) {
- arithmetic_op_32(0x33, dst, src);
- }
-
- void xorl(Register dst, const Operand& src) {
- arithmetic_op_32(0x33, dst, src);
- }
-
- void xorl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x6, dst, src);
- }
-
- void xorl(const Operand& dst, Register src) {
- arithmetic_op_32(0x31, src, dst);
- }
-
- void xorl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x6, dst, src);
- }
-
- void xor_(Register dst, const Operand& src) {
- arithmetic_op(0x33, dst, src);
- }
-
- void xor_(const Operand& dst, Register src) {
- arithmetic_op(0x31, src, dst);
- }
-
- void xor_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x6, dst, src);
- }
-
- void xor_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x6, dst, src);
- }
// Bit operations.
void bt(const Operand& dst, Register src);
void bts(const Operand& dst, Register src);
+ void bsrl(Register dst, Register src);
// Miscellaneous
void clc();
@@ -1260,9 +1005,6 @@ class Assembler : public AssemblerBase {
// Call near absolute indirect, address in register
void call(Register adr);
- // Call near indirect
- void call(const Operand& operand);
-
// Jumps
// Jump short or near relative.
// Use a 32-bit signed displacement.
@@ -1274,9 +1016,6 @@ class Assembler : public AssemblerBase {
// Jump near absolute indirect (r64)
void jmp(Register adr);
- // Jump near absolute indirect (m64)
- void jmp(const Operand& src);
-
// Conditional jumps
void j(Condition cc,
Label* L,
@@ -1407,6 +1146,8 @@ class Assembler : public AssemblerBase {
void movapd(XMMRegister dst, XMMRegister src);
+ void psllq(XMMRegister reg, byte imm8);
+
void cvttsd2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, XMMRegister src);
void cvttsd2siq(Register dst, XMMRegister src);
@@ -1472,6 +1213,12 @@ class Assembler : public AssemblerBase {
// Use --code-comments to enable.
void RecordComment(const char* msg, bool force = false);
+ // Allocate a constant pool of the correct size for the generated code.
+ MaybeObject* AllocateConstantPool(Heap* heap);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
void db(uint8_t data);
@@ -1499,6 +1246,13 @@ class Assembler : public AssemblerBase {
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+ protected:
+ // Call near indirect
+ void call(const Operand& operand);
+
+ // Jump near absolute indirect (m64)
+ void jmp(const Operand& src);
+
private:
byte* addr_at(int pos) { return buffer_ + pos; }
uint32_t long_at(int pos) {
@@ -1605,6 +1359,14 @@ class Assembler : public AssemblerBase {
// numbers have a high bit set.
inline void emit_optional_rex_32(const Operand& op);
+ void emit_rex(int size) {
+ if (size == kInt64Size) {
+ emit_rex_64();
+ } else {
+ ASSERT(size == kInt32Size);
+ }
+ }
+
template<class P1>
void emit_rex(P1 p1, int size) {
if (size == kInt64Size) {
@@ -1709,12 +1471,331 @@ class Assembler : public AssemblerBase {
// record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ // Arithmetics
+ void emit_add(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x03, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x03, dst, src);
+ }
+ }
+
+ void emit_add(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x0, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x0, dst, src);
+ }
+ }
+
+ void emit_add(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x03, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x03, dst, src);
+ }
+ }
+
+ void emit_add(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x1, src, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x1, src, dst);
+ }
+ }
+
+ void emit_add(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x0, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x0, dst, src);
+ }
+ }
+
+ void emit_and(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x23, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x23, dst, src);
+ }
+ }
+
+ void emit_and(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x23, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x23, dst, src);
+ }
+ }
+
+ void emit_and(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x21, src, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x21, src, dst);
+ }
+ }
+
+ void emit_and(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x4, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x4, dst, src);
+ }
+ }
+
+ void emit_and(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x4, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x4, dst, src);
+ }
+ }
+
+ void emit_cmp(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x3B, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x3B, dst, src);
+ }
+ }
+
+ void emit_cmp(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x3B, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x3B, dst, src);
+ }
+ }
+
+ void emit_cmp(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x39, src, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x39, src, dst);
+ }
+ }
+
+ void emit_cmp(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x7, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x7, dst, src);
+ }
+ }
+
+ void emit_cmp(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x7, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x7, dst, src);
+ }
+ }
+
+ void emit_dec(Register dst, int size);
+ void emit_dec(const Operand& dst, int size);
+
+ // Divide rdx:rax by src. Quotient in rax, remainder in rdx when size is 64.
+ // Divide edx:eax by lower 32 bits of src. Quotient in eax, remainder in edx
+ // when size is 32.
+ void emit_idiv(Register src, int size);
+
+ // Signed multiply instructions.
+ // rdx:rax = rax * src when size is 64 or edx:eax = eax * src when size is 32.
+ void emit_imul(Register src, int size);
+ void emit_imul(Register dst, Register src, int size);
+ void emit_imul(Register dst, const Operand& src, int size);
+ void emit_imul(Register dst, Register src, Immediate imm, int size);
+
+ void emit_inc(Register dst, int size);
+ void emit_inc(const Operand& dst, int size);
+
+ void emit_lea(Register dst, const Operand& src, int size);
+
void emit_mov(Register dst, const Operand& src, int size);
void emit_mov(Register dst, Register src, int size);
void emit_mov(const Operand& dst, Register src, int size);
void emit_mov(Register dst, Immediate value, int size);
void emit_mov(const Operand& dst, Immediate value, int size);
+ void emit_movzxb(Register dst, const Operand& src, int size);
+ void emit_movzxw(Register dst, const Operand& src, int size);
+ void emit_movzxw(Register dst, Register src, int size);
+
+ void emit_neg(Register dst, int size);
+ void emit_neg(const Operand& dst, int size);
+
+ void emit_not(Register dst, int size);
+ void emit_not(const Operand& dst, int size);
+
+ void emit_or(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x0B, dst, src);
+ } else {
+ arithmetic_op_32(0x0B, dst, src);
+ }
+ }
+
+ void emit_or(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x0B, dst, src);
+ } else {
+ arithmetic_op_32(0x0B, dst, src);
+ }
+ }
+
+ void emit_or(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x9, src, dst);
+ } else {
+ arithmetic_op_32(0x9, src, dst);
+ }
+ }
+
+ void emit_or(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x1, dst, src);
+ } else {
+ immediate_arithmetic_op_32(0x1, dst, src);
+ }
+ }
+
+ void emit_or(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x1, dst, src);
+ } else {
+ immediate_arithmetic_op_32(0x1, dst, src);
+ }
+ }
+
+ void emit_repmovs(int size);
+
+ void emit_sbb(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x1b, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x1b, dst, src);
+ }
+ }
+
+ void emit_sub(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x2B, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x2B, dst, src);
+ }
+ }
+
+ void emit_sub(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x5, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x5, dst, src);
+ }
+ }
+
+ void emit_sub(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x2B, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x2B, dst, src);
+ }
+ }
+
+ void emit_sub(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x29, src, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x29, src, dst);
+ }
+ }
+
+ void emit_sub(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x5, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x5, dst, src);
+ }
+ }
+
+ void emit_test(Register dst, Register src, int size);
+ void emit_test(Register reg, Immediate mask, int size);
+ void emit_test(const Operand& op, Register reg, int size);
+ void emit_test(const Operand& op, Immediate mask, int size);
+
+ // Exchange two registers
+ void emit_xchg(Register dst, Register src, int size);
+
+ void emit_xor(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ if (dst.code() == src.code()) {
+ arithmetic_op_32(0x33, dst, src);
+ } else {
+ arithmetic_op(0x33, dst, src);
+ }
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x33, dst, src);
+ }
+ }
+
+ void emit_xor(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x33, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x33, dst, src);
+ }
+ }
+
+ void emit_xor(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x6, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x6, dst, src);
+ }
+ }
+
+ void emit_xor(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x6, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x6, dst, src);
+ }
+ }
+
+ void emit_xor(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x31, src, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x31, src, dst);
+ }
+ }
+
friend class CodePatcher;
friend class EnsureSpace;
friend class RegExpMacroAssemblerX64;
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 6717dd5d6..d5b1a7386 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -61,7 +61,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
if (extra_args == NEEDS_CALLED_FUNCTION) {
num_extra_args = 1;
__ PopReturnAddressTo(kScratchRegister);
- __ push(rdi);
+ __ Push(rdi);
__ PushReturnAddressFrom(kScratchRegister);
} else {
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
@@ -69,7 +69,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// JumpToExternalReference expects rax to contain the number of arguments
// including the receiver and the extra arguments.
- __ addq(rax, Immediate(num_extra_args + 1));
+ __ addp(rax, Immediate(num_extra_args + 1));
__ JumpToExternalReference(ExternalReference(id, masm->isolate()), 1);
}
@@ -78,13 +78,13 @@ static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
- __ push(rdi);
+ __ Push(rdi);
// Function is also the parameter to the runtime call.
- __ push(rdi);
+ __ Push(rdi);
__ CallRuntime(function_id, 1);
// Restore receiver.
- __ pop(rdi);
+ __ Pop(rdi);
}
@@ -93,13 +93,13 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(kScratchRegister,
FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
- __ lea(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
+ __ leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
__ jmp(kScratchRegister);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
+ __ leap(rax, FieldOperand(rax, Code::kHeaderSize));
__ jmp(rax);
}
@@ -114,7 +114,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok);
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
@@ -124,25 +124,38 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions) {
+ bool count_constructions,
+ bool create_memento) {
// ----------- S t a t e -------------
// -- rax: number of arguments
// -- rdi: constructor function
+ // -- rbx: allocation site or undefined
// -----------------------------------
// Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
+ ASSERT(!is_api_function || !count_constructions);\
+
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
+
+ // Should never create mementos before slack tracking is finished.
+ ASSERT(!count_constructions || !create_memento);
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(rbx);
+ __ Push(rbx);
+ }
+
// Store a smi-tagged arguments count on the stack.
__ Integer32ToSmi(rax, rax);
- __ push(rax);
+ __ Push(rax);
// Push the function to invoke on the stack.
- __ push(rdi);
+ __ Push(rdi);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
@@ -154,7 +167,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(masm->isolate());
__ Move(kScratchRegister, debug_step_in_fp);
- __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
+ __ cmpp(Operand(kScratchRegister, 0), Immediate(0));
__ j(not_equal, &rt_call);
#endif
@@ -186,22 +199,25 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
SharedFunctionInfo::kConstructionCountOffset));
__ j(not_zero, &allocate);
- __ push(rax);
- __ push(rdi);
+ __ Push(rax);
+ __ Push(rdi);
- __ push(rdi); // constructor
+ __ Push(rdi); // constructor
// The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
- __ pop(rdi);
- __ pop(rax);
+ __ Pop(rdi);
+ __ Pop(rax);
__ bind(&allocate);
}
// Now allocate the JSObject on the heap.
- __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
+ __ movzxbp(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
__ shl(rdi, Immediate(kPointerSizeLog2));
+ if (create_memento) {
+ __ addp(rdi, Immediate(AllocationMemento::kSize));
+ }
// rdi: size of new object
__ Allocate(rdi,
rbx,
@@ -209,10 +225,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
no_reg,
&rt_call,
NO_ALLOCATION_FLAGS);
+ Factory* factory = masm->isolate()->factory();
// Allocated the JSObject, now initialize the fields.
// rax: initial map
// rbx: JSObject (not HeapObject tagged - the actual address).
- // rdi: start of next object
+ // rdi: start of next object (including memento if create_memento)
__ movp(Operand(rbx, JSObject::kMapOffset), rax);
__ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
__ movp(Operand(rbx, JSObject::kPropertiesOffset), rcx);
@@ -220,24 +237,39 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Set extra fields in the newly allocated object.
// rax: initial map
// rbx: JSObject
- // rdi: start of next object
- __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
+ // rdi: start of next object (including memento if create_memento)
+ __ leap(rcx, Operand(rbx, JSObject::kHeaderSize));
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
if (count_constructions) {
- __ movzxbq(rsi,
+ __ movzxbp(rsi,
FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ lea(rsi,
+ __ leap(rsi,
Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
// rsi: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
- __ cmpq(rsi, rdi);
+ __ cmpp(rsi, rdi);
__ Assert(less_equal,
kUnexpectedNumberOfPreAllocatedPropertyFields);
}
__ InitializeFieldsWithFiller(rcx, rsi, rdx);
__ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(rcx, rdi, rdx);
+ } else if (create_memento) {
+ __ leap(rsi, Operand(rdi, -AllocationMemento::kSize));
+ __ InitializeFieldsWithFiller(rcx, rsi, rdx);
+
+ // Fill in memento fields if necessary.
+ // rsi: points to the allocated but uninitialized memento.
+ Handle<Map> allocation_memento_map = factory->allocation_memento_map();
+ __ Move(Operand(rsi, AllocationMemento::kMapOffset),
+ allocation_memento_map);
+ // Get the cell or undefined.
+ __ movp(rdx, Operand(rsp, kPointerSize*2));
+ __ movp(Operand(rsi, AllocationMemento::kAllocationSiteOffset),
+ rdx);
+ } else {
+ __ InitializeFieldsWithFiller(rcx, rdi, rdx);
}
- __ InitializeFieldsWithFiller(rcx, rdi, rdx);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -246,7 +278,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rax: initial map
// rbx: JSObject
// rdi: start of next object
- __ or_(rbx, Immediate(kHeapObjectTag));
+ __ orp(rbx, Immediate(kHeapObjectTag));
// Check if a non-empty properties array is needed.
// Allocate and initialize a FixedArray if it is.
@@ -254,13 +286,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rbx: JSObject
// rdi: start of next object
// Calculate total properties described map.
- __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
- __ movzxbq(rcx,
+ __ movzxbp(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+ __ movzxbp(rcx,
FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ addq(rdx, rcx);
+ __ addp(rdx, rcx);
// Calculate unused properties past the end of the in-object properties.
- __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
- __ subq(rdx, rcx);
+ __ movzxbp(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
+ __ subp(rdx, rcx);
// Done if no extra properties are to be allocated.
__ j(zero, &allocated);
__ Assert(positive, kPropertyAllocationCountFailed);
@@ -296,13 +328,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rdx: number of elements
{ Label loop, entry;
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
+ __ leap(rcx, Operand(rdi, FixedArray::kHeaderSize));
__ jmp(&entry);
__ bind(&loop);
__ movp(Operand(rcx, 0), rdx);
- __ addq(rcx, Immediate(kPointerSize));
+ __ addp(rcx, Immediate(kPointerSize));
__ bind(&entry);
- __ cmpq(rcx, rax);
+ __ cmpp(rcx, rax);
__ j(below, &loop);
}
@@ -310,7 +342,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// the JSObject
// rbx: JSObject
// rdi: FixedArray
- __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
+ __ orp(rdi, Immediate(kHeapObjectTag)); // add the heap tag
__ movp(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
@@ -329,17 +361,50 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
// rdi: function (constructor)
__ bind(&rt_call);
+ int offset = 0;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ movp(rdi, Operand(rsp, kPointerSize*2));
+ __ Push(rdi);
+ offset = kPointerSize;
+ }
+
// Must restore rdi (constructor) before calling runtime.
- __ movp(rdi, Operand(rsp, 0));
- __ push(rdi);
- __ CallRuntime(Runtime::kNewObject, 1);
+ __ movp(rdi, Operand(rsp, offset));
+ __ Push(rdi);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ }
__ movp(rbx, rax); // store result in rbx
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
// New object allocated.
// rbx: newly allocated object
__ bind(&allocated);
+
+ if (create_memento) {
+ __ movp(rcx, Operand(rsp, kPointerSize*2));
+ __ Cmp(rcx, masm->isolate()->factory()->undefined_value());
+ __ j(equal, &count_incremented);
+ // rcx is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ SmiAddConstant(
+ FieldOperand(rcx, AllocationSite::kPretenureCreateCountOffset),
+ Smi::FromInt(1));
+ __ bind(&count_incremented);
+ }
+
// Retrieve the function from the stack.
- __ pop(rdi);
+ __ Pop(rdi);
// Retrieve smi-tagged arguments count from the stack.
__ movp(rax, Operand(rsp, 0));
@@ -348,20 +413,20 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
// conventions dictate that the called function pops the receiver.
- __ push(rbx);
- __ push(rbx);
+ __ Push(rbx);
+ __ Push(rbx);
// Set up pointer to last argument.
- __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+ __ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
Label loop, entry;
__ movp(rcx, rax);
__ jmp(&entry);
__ bind(&loop);
- __ push(Operand(rbx, rcx, times_pointer_size, 0));
+ __ Push(Operand(rbx, rcx, times_pointer_size, 0));
__ bind(&entry);
- __ decq(rcx);
+ __ decp(rcx);
__ j(greater_equal, &loop);
// Call the function.
@@ -411,7 +476,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Remove caller arguments from the stack and return.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+ __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ PushReturnAddressFrom(rcx);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->constructed_objects(), 1);
@@ -420,17 +485,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
@@ -470,8 +535,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ movp(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
// Push the function and the receiver onto the stack.
- __ push(rdx);
- __ push(r8);
+ __ Push(rdx);
+ __ Push(r8);
// Load the number of arguments and setup pointer to the arguments.
__ movp(rax, r9);
@@ -497,8 +562,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Push the function and receiver and setup the context.
- __ push(rdi);
- __ push(rdx);
+ __ Push(rdi);
+ __ Push(rdx);
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Load the number of arguments and setup pointer to the arguments.
@@ -524,18 +589,16 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ jmp(&entry);
__ bind(&loop);
__ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
- __ push(Operand(kScratchRegister, 0)); // dereference handle
- __ addq(rcx, Immediate(1));
+ __ Push(Operand(kScratchRegister, 0)); // dereference handle
+ __ addp(rcx, Immediate(1));
__ bind(&entry);
- __ cmpq(rcx, rax);
+ __ cmpp(rcx, rax);
__ j(not_equal, &loop);
// Invoke the code.
if (is_construct) {
// No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->factory()->undefined_value());
- __ Move(rbx, undefined_sentinel);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
// Expects rdi to hold function pointer.
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
@@ -565,7 +628,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
GenerateTailCallToReturnedCode(masm);
}
@@ -574,15 +637,15 @@ static void CallCompileOptimized(MacroAssembler* masm,
bool concurrent) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
- __ push(rdi);
+ __ Push(rdi);
// Function is also the parameter to the runtime call.
- __ push(rdi);
+ __ Push(rdi);
// Whether to compile in a background thread.
__ Push(masm->isolate()->factory()->ToBoolean(concurrent));
- __ CallRuntime(Runtime::kCompileOptimized, 2);
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
// Restore receiver.
- __ pop(rdi);
+ __ Pop(rdi);
}
@@ -607,7 +670,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// Re-execute the code that was patched back to the young age when
// the stub returns.
- __ subq(Operand(rsp, 0), Immediate(5));
+ __ subp(Operand(rsp, 0), Immediate(5));
__ Pushad();
__ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
__ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
@@ -643,7 +706,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ Pushad();
__ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
__ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
- __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+ __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
{ // NOLINT
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(2);
@@ -655,10 +718,10 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// Perform prologue operations usually performed by the young code stub.
__ PopReturnAddressTo(kScratchRegister);
- __ push(rbp); // Caller's frame pointer.
+ __ pushq(rbp); // Caller's frame pointer.
__ movp(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
+ __ Push(rsi); // Callee's context.
+ __ Push(rdi); // Callee's JS Function.
__ PushReturnAddressFrom(kScratchRegister);
// Jump to point after the code-age stub.
@@ -681,12 +744,12 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ Pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ Popad();
// Tear down internal frame.
}
- __ pop(MemOperand(rsp, 0)); // Ignore state offset
+ __ Pop(MemOperand(rsp, 0)); // Ignore state offset
__ ret(0); // Return to IC Miss stub, continuation still on stack.
}
@@ -710,7 +773,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the deoptimization type to the runtime system.
__ Push(Smi::FromInt(static_cast<int>(type)));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
// Tear down internal frame.
}
@@ -719,13 +782,13 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Switch on the state.
Label not_no_registers, not_tos_rax;
- __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
+ __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
__ j(not_equal, &not_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
__ movp(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
- __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
+ __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
@@ -762,12 +825,12 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
//
// 1. Make sure we have at least one argument.
{ Label done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &done);
__ PopReturnAddressTo(rbx);
__ Push(masm->isolate()->factory()->undefined_value());
__ PushReturnAddressFrom(rbx);
- __ incq(rax);
+ __ incp(rax);
__ bind(&done);
}
@@ -799,7 +862,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
__ j(not_zero, &shift_arguments);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ movp(rbx, args.GetArgumentOperand(1));
__ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
@@ -817,14 +880,14 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Enter an internal frame in order to preserve argument count.
FrameScope scope(masm, StackFrame::INTERNAL);
__ Integer32ToSmi(rax, rax);
- __ push(rax);
+ __ Push(rax);
- __ push(rbx);
+ __ Push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ movp(rbx, rax);
__ Set(rdx, 0); // indicate regular JS_FUNCTION
- __ pop(rax);
+ __ Pop(rax);
__ SmiToInteger32(rax, rax);
}
@@ -866,25 +929,25 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&loop);
__ movp(rbx, Operand(rsp, rcx, times_pointer_size, 0));
__ movp(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
- __ decq(rcx);
+ __ decp(rcx);
__ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(rbx); // Discard copy of return address.
- __ decq(rax); // One fewer argument (first argument is new receiver).
+ __ popq(rbx); // Discard copy of return address.
+ __ decp(rax); // One fewer argument (first argument is new receiver).
}
// 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
// or a function proxy via CALL_FUNCTION_PROXY.
{ Label function, non_proxy;
- __ testq(rdx, rdx);
+ __ testp(rdx, rdx);
__ j(zero, &function);
__ Set(rbx, 0);
- __ cmpq(rdx, Immediate(1));
+ __ cmpp(rdx, Immediate(1));
__ j(not_equal, &non_proxy);
__ PopReturnAddressTo(rdx);
- __ push(rdi); // re-add proxy object as additional argument
+ __ Push(rdi); // re-add proxy object as additional argument
__ PushReturnAddressFrom(rdx);
- __ incq(rax);
+ __ incp(rax);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -904,7 +967,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ cmpq(rax, rbx);
+ __ cmpp(rax, rbx);
__ j(not_equal,
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -932,8 +995,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
static const int kFunctionOffset = kReceiverOffset + kPointerSize;
- __ push(Operand(rbp, kFunctionOffset));
- __ push(Operand(rbp, kArgumentsOffset));
+ __ Push(Operand(rbp, kFunctionOffset));
+ __ Push(Operand(rbp, kArgumentsOffset));
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
// Check the stack for overflow. We are not trying to catch
@@ -944,17 +1007,17 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ movp(rcx, rsp);
// Make rcx the space we have left. The stack might already be overflowed
// here which will cause rcx to become negative.
- __ subq(rcx, kScratchRegister);
+ __ subp(rcx, kScratchRegister);
// Make rdx the space we need for the array when it is unrolled onto the
// stack.
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
- __ cmpq(rcx, rdx);
+ __ cmpp(rcx, rdx);
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
- __ push(Operand(rbp, kFunctionOffset));
- __ push(rax);
+ __ Push(Operand(rbp, kFunctionOffset));
+ __ Push(rax);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
__ bind(&okay);
// End of stack check.
@@ -963,8 +1026,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kLimitOffset =
StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(rax); // limit
- __ push(Immediate(0)); // index
+ __ Push(rax); // limit
+ __ Push(Immediate(0)); // index
// Get the receiver.
__ movp(rbx, Operand(rbp, kReceiverOffset));
@@ -990,7 +1053,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
__ j(not_equal, &push_receiver);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ JumpIfSmi(rbx, &call_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
@@ -1005,7 +1068,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Convert the receiver to an object.
__ bind(&call_to_object);
- __ push(rbx);
+ __ Push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ movp(rbx, rax);
__ jmp(&push_receiver, Label::kNear);
@@ -1017,7 +1080,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Push the receiver.
__ bind(&push_receiver);
- __ push(rbx);
+ __ Push(rbx);
// Copy all arguments from the array to the stack.
Label entry, loop;
@@ -1036,7 +1099,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// case, we know that we are not generating a test instruction next.
// Push the nth argument.
- __ push(rax);
+ __ Push(rax);
// Update the index on the stack and in register rax.
__ movp(rax, Operand(rbp, kIndexOffset));
@@ -1044,7 +1107,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ movp(Operand(rbp, kIndexOffset), rax);
__ bind(&entry);
- __ cmpq(rax, Operand(rbp, kLimitOffset));
+ __ cmpp(rax, Operand(rbp, kLimitOffset));
__ j(not_equal, &loop);
// Call the function.
@@ -1061,8 +1124,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Call the function proxy.
__ bind(&call_proxy);
- __ push(rdi); // add function proxy as last argument
- __ incq(rax);
+ __ Push(rdi); // add function proxy as last argument
+ __ incp(rax);
__ Set(rbx, 0);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
@@ -1128,10 +1191,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// tail call a stub
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ Move(rbx, undefined_sentinel);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -1150,7 +1210,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, rcx);
- __ cmpq(rdi, rcx);
+ __ cmpp(rdi, rcx);
__ Assert(equal, kUnexpectedStringFunction);
}
@@ -1158,11 +1218,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// (including the receiver).
StackArgumentsAccessor args(rsp, rax);
Label no_arguments;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(zero, &no_arguments);
__ movp(rbx, args.GetArgumentOperand(1));
__ PopReturnAddressTo(rcx);
- __ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
__ PushReturnAddressFrom(rcx);
__ movp(rax, rbx);
@@ -1233,10 +1293,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ IncrementCounter(counters->string_ctor_conversions(), 1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rdi); // Preserve the function.
- __ push(rax);
+ __ Push(rdi); // Preserve the function.
+ __ Push(rax);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ pop(rdi);
+ __ Pop(rdi);
}
__ movp(rbx, rax);
__ jmp(&argument_is_string);
@@ -1246,7 +1306,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&no_arguments);
__ LoadRoot(rbx, Heap::kempty_stringRootIndex);
__ PopReturnAddressTo(rcx);
- __ lea(rsp, Operand(rsp, kPointerSize));
+ __ leap(rsp, Operand(rsp, kPointerSize));
__ PushReturnAddressFrom(rcx);
__ jmp(&argument_is_string);
@@ -1256,7 +1316,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ IncrementCounter(counters->string_ctor_gc_required(), 1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rbx);
+ __ Push(rbx);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
}
__ ret(0);
@@ -1264,20 +1324,20 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ push(rbp);
+ __ pushq(rbp);
__ movp(rbp, rsp);
// Store the arguments adaptor context sentinel.
__ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
// Push the function on the stack.
- __ push(rdi);
+ __ Push(rdi);
// Preserve the number of arguments on the stack. Must preserve rax,
// rbx and rcx because these registers are used when copying the
// arguments and the receiver.
__ Integer32ToSmi(r8, rax);
- __ push(r8);
+ __ Push(r8);
}
@@ -1287,12 +1347,12 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Leave the frame.
__ movp(rsp, rbp);
- __ pop(rbp);
+ __ popq(rbp);
// Remove caller arguments from the stack.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+ __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ PushReturnAddressFrom(rcx);
}
@@ -1310,9 +1370,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label enough, too_few;
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ cmpq(rax, rbx);
+ __ cmpp(rax, rbx);
__ j(less, &too_few);
- __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
{ // Enough parameters: Actual >= expected.
@@ -1321,15 +1381,15 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
+ __ leap(rax, Operand(rbp, rax, times_pointer_size, offset));
__ Set(r8, -1); // account for receiver
Label copy;
__ bind(&copy);
- __ incq(r8);
- __ push(Operand(rax, 0));
- __ subq(rax, Immediate(kPointerSize));
- __ cmpq(r8, rbx);
+ __ incp(r8);
+ __ Push(Operand(rax, 0));
+ __ subp(rax, Immediate(kPointerSize));
+ __ cmpp(r8, rbx);
__ j(less, &copy);
__ jmp(&invoke);
}
@@ -1340,24 +1400,24 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
+ __ leap(rdi, Operand(rbp, rax, times_pointer_size, offset));
__ Set(r8, -1); // account for receiver
Label copy;
__ bind(&copy);
- __ incq(r8);
- __ push(Operand(rdi, 0));
- __ subq(rdi, Immediate(kPointerSize));
- __ cmpq(r8, rax);
+ __ incp(r8);
+ __ Push(Operand(rdi, 0));
+ __ subp(rdi, Immediate(kPointerSize));
+ __ cmpp(r8, rax);
__ j(less, &copy);
// Fill remaining expected arguments with undefined values.
Label fill;
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ bind(&fill);
- __ incq(r8);
- __ push(kScratchRegister);
- __ cmpq(r8, rbx);
+ __ incp(r8);
+ __ Push(kScratchRegister);
+ __ cmpp(r8, rbx);
__ j(less, &fill);
// Restore function pointer.
@@ -1389,13 +1449,13 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
- __ push(rax);
+ __ Push(rax);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
Label skip;
// If the code object is null, just return to the unoptimized code.
- __ cmpq(rax, Immediate(0));
+ __ cmpp(rax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
@@ -1409,7 +1469,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
// Compute the target address = code_obj + header_size + osr_offset
- __ lea(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
+ __ leap(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
// Overwrite the return address on the stack.
__ movq(StackOperandForReturnAddress(0), rax);
@@ -1426,7 +1486,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ j(above_equal, &ok);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
}
__ jmp(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 075964bce..c949a423a 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -46,7 +46,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
}
@@ -77,7 +77,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
}
@@ -88,7 +88,8 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
}
@@ -99,15 +100,15 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
}
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rbx };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { rbx, rdx };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
@@ -142,7 +143,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
}
@@ -166,6 +167,26 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
+void StringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax, rcx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -213,7 +234,7 @@ static void InitializeArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
}
@@ -241,7 +262,7 @@ static void InitializeInternalArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
}
@@ -365,7 +386,7 @@ void StringAddStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
}
@@ -470,7 +491,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
rax.is(descriptor->register_params_[param_count - 1]));
// Push arguments
for (int i = 0; i < param_count; ++i) {
- __ push(descriptor->register_params_[i]);
+ __ Push(descriptor->register_params_[i]);
}
ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, descriptor->register_param_count_);
@@ -521,7 +542,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
int double_offset = offset();
// Account for return address and saved regs if input is rsp.
- if (input_reg.is(rsp)) double_offset += 3 * kPointerSize;
+ if (input_reg.is(rsp)) double_offset += 3 * kRegisterSize;
MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
MemOperand exponent_operand(MemOperand(input_reg,
@@ -541,14 +562,14 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// is the return register, then save the temp register we use in its stead
// for the result.
Register save_reg = final_result_reg.is(rcx) ? rax : rcx;
- __ push(scratch1);
- __ push(save_reg);
+ __ pushq(scratch1);
+ __ pushq(save_reg);
bool stash_exponent_copy = !input_reg.is(rsp);
__ movl(scratch1, mantissa_operand);
__ movsd(xmm0, mantissa_operand);
__ movl(rcx, exponent_operand);
- if (stash_exponent_copy) __ push(rcx);
+ if (stash_exponent_copy) __ pushq(rcx);
__ andl(rcx, Immediate(HeapNumber::kExponentMask));
__ shrl(rcx, Immediate(HeapNumber::kExponentShift));
@@ -583,14 +604,14 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Restore registers
__ bind(&done);
if (stash_exponent_copy) {
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
}
if (!final_result_reg.is(result_reg)) {
ASSERT(final_result_reg.is(rcx));
__ movl(final_result_reg, result_reg);
}
- __ pop(save_reg);
- __ pop(scratch1);
+ __ popq(save_reg);
+ __ popq(scratch1);
__ ret(0);
}
@@ -601,14 +622,14 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
// Load operand in rdx into xmm0, or branch to not_numbers.
__ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
__ JumpIfSmi(rdx, &load_smi_rdx);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
+ __ cmpp(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers); // Argument in rdx is not a number.
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
// Load operand in rax into xmm1, or branch to not_numbers.
__ JumpIfSmi(rax, &load_smi_rax);
__ bind(&load_nonsmi_rax);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ cmpp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers);
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&done);
@@ -689,8 +710,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&try_arithmetic_simplification);
__ cvttsd2si(exponent, double_exponent);
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
- __ cmpl(exponent, Immediate(0x80000000u));
- __ j(equal, &call_runtime);
+ __ cmpl(exponent, Immediate(0x1));
+ __ j(overflow, &call_runtime);
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
@@ -767,7 +788,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&fast_power);
__ fnclex(); // Clear flags to catch exceptions later.
// Transfer (B)ase and (E)xponent onto the FPU register stack.
- __ subq(rsp, Immediate(kDoubleSize));
+ __ subp(rsp, Immediate(kDoubleSize));
__ movsd(Operand(rsp, 0), double_exponent);
__ fld_d(Operand(rsp, 0)); // E
__ movsd(Operand(rsp, 0), double_base);
@@ -794,12 +815,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &fast_power_failed, Label::kNear);
__ fstp_d(Operand(rsp, 0));
__ movsd(double_result, Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
__ jmp(&done);
__ bind(&fast_power_failed);
__ fninit();
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
__ jmp(&call_runtime);
}
@@ -913,99 +934,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- __ Cmp(rax, masm->isolate()->factory()->length_string());
- __ j(not_equal, &miss);
- receiver = rdx;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- receiver = rax;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss);
- __ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = rdx;
- Register value = rax;
- Register scratch = rbx;
- if (kind() == Code::KEYED_STORE_IC) {
- __ Cmp(rcx, masm->isolate()->factory()->length_string());
- __ j(not_equal, &miss);
- }
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ movp(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
- __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ movp(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
- __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ PopReturnAddressTo(scratch);
- __ push(receiver);
- __ push(value);
- __ PushReturnAddressFrom(scratch);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in rdx and the parameter count is in rax.
@@ -1026,7 +954,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Check index against formal parameters count limit passed in
// through register rax. Use unsigned comparison to get negative
// check for free.
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
@@ -1041,7 +969,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// comparison to get negative check for free.
__ bind(&adaptor);
__ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmpq(rdx, rcx);
+ __ cmpp(rdx, rcx);
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
@@ -1056,13 +984,13 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ PopReturnAddressTo(rbx);
- __ push(rdx);
+ __ Push(rdx);
__ PushReturnAddressFrom(rbx);
__ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Stack layout:
// rsp[0] : return address
// rsp[8] : number of parameters (tagged)
@@ -1095,14 +1023,14 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ SmiToInteger64(rcx,
Operand(rdx,
ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
__ movp(args.GetArgumentOperand(1), rdx);
// rbx = parameter count (untagged)
// rcx = argument count (untagged)
// Compute the mapped parameter count = min(rbx, rcx) in rbx.
- __ cmpq(rbx, rcx);
+ __ cmpp(rbx, rcx);
__ j(less_equal, &try_allocate, Label::kNear);
__ movp(rbx, rcx);
@@ -1113,17 +1041,17 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
Label no_parameter_map;
- __ xor_(r8, r8);
- __ testq(rbx, rbx);
+ __ xorp(r8, r8);
+ __ testp(rbx, rbx);
__ j(zero, &no_parameter_map, Label::kNear);
- __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ leap(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
__ bind(&no_parameter_map);
// 2. Backing store.
- __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ leap(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
// 3. Arguments object.
- __ addq(r8, Immediate(Heap::kArgumentsObjectSize));
+ __ addp(r8, Immediate(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
@@ -1134,10 +1062,10 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Label has_mapped_parameters, copy;
__ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
- __ testq(rbx, rbx);
+ __ testp(rbx, rbx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
- const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
+ const int kIndex = Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX;
__ movp(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
__ jmp(&copy, Label::kNear);
@@ -1174,7 +1102,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, edi will point there, otherwise to the
// backing store.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
+ __ leap(rdi, Operand(rax, Heap::kSloppyArgumentsObjectSize));
__ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
// rax = address of new object (tagged)
@@ -1184,16 +1112,16 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
- __ testq(rbx, rbx);
+ __ testp(rbx, rbx);
__ j(zero, &skip_parameter_map);
- __ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ LoadRoot(kScratchRegister, Heap::kSloppyArgumentsElementsMapRootIndex);
// rbx contains the untagged argument count. Add 2 and tag to write.
__ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
__ Integer64PlusConstantToSmi(r9, rbx, 2);
__ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
__ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
- __ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ leap(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
__ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
// Copy the parameter slots and the holes in the arguments.
@@ -1209,11 +1137,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Load tagged parameter count into r9.
__ Integer32ToSmi(r9, rbx);
__ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
- __ addq(r8, args.GetArgumentOperand(2));
- __ subq(r8, r9);
+ __ addp(r8, args.GetArgumentOperand(2));
+ __ subp(r8, r9);
__ Move(r11, factory->the_hole_value());
__ movp(rdx, rdi);
- __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ leap(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
// r9 = loop variable (tagged)
// r8 = mapping index (tagged)
// r11 = the hole value
@@ -1251,21 +1179,21 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ movp(rdx, args.GetArgumentOperand(1));
// Untag rcx for the loop below.
__ SmiToInteger64(rcx, rcx);
- __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
- __ subq(rdx, kScratchRegister);
+ __ leap(kScratchRegister, Operand(r8, times_pointer_size, 0));
+ __ subp(rdx, kScratchRegister);
__ jmp(&arguments_test, Label::kNear);
__ bind(&arguments_loop);
- __ subq(rdx, Immediate(kPointerSize));
+ __ subp(rdx, Immediate(kPointerSize));
__ movp(r9, Operand(rdx, 0));
__ movp(FieldOperand(rdi, r8,
times_pointer_size,
FixedArray::kHeaderSize),
r9);
- __ addq(r8, Immediate(1));
+ __ addp(r8, Immediate(1));
__ bind(&arguments_test);
- __ cmpq(r8, rcx);
+ __ cmpp(r8, rcx);
__ j(less, &arguments_loop, Label::kNear);
// Return and remove the on-stack parameters.
@@ -1276,11 +1204,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ bind(&runtime);
__ Integer32ToSmi(rcx, rcx);
__ movp(args.GetArgumentOperand(2), rcx); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// rsp[0] : return address
// rsp[8] : number of parameters
// rsp[16] : receiver displacement
@@ -1298,12 +1226,12 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
__ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ movp(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
__ movp(args.GetArgumentOperand(1), rdx);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
@@ -1331,7 +1259,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ movp(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
__ movp(args.GetArgumentOperand(1), rdx);
@@ -1339,11 +1267,11 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &add_arguments_object, Label::kNear);
- __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ leap(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict));
+ __ addp(rcx, Immediate(Heap::kStrictArgumentsObjectSize));
// Do the allocation of both objects in one go.
__ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
@@ -1352,7 +1280,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
const int offset =
- Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
+ Context::SlotOffset(Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX);
__ movp(rdi, Operand(rdi, offset));
// Copy the JS object part.
@@ -1370,7 +1298,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// If there are no actual arguments, we're done.
Label done;
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &done);
// Get the parameters pointer from the stack.
@@ -1378,7 +1306,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
+ __ leap(rdi, Operand(rax, Heap::kStrictArgumentsObjectSize));
__ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
__ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
__ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
@@ -1393,9 +1321,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ bind(&loop);
__ movp(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
__ movp(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
- __ addq(rdi, Immediate(kPointerSize));
- __ subq(rdx, Immediate(kPointerSize));
- __ decq(rcx);
+ __ addp(rdi, Immediate(kPointerSize));
+ __ subp(rdx, Immediate(kPointerSize));
+ __ decp(rcx);
__ j(not_zero, &loop);
// Return and remove the on-stack parameters.
@@ -1404,7 +1332,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
}
@@ -1413,7 +1341,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -1441,7 +1369,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ExternalReference address_of_regexp_stack_memory_size =
ExternalReference::address_of_regexp_stack_memory_size(isolate);
__ Load(kScratchRegister, address_of_regexp_stack_memory_size);
- __ testq(kScratchRegister, kScratchRegister);
+ __ testp(kScratchRegister, kScratchRegister);
__ j(zero, &runtime);
// Check that the first argument is a JSRegExp object.
@@ -1533,7 +1461,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- __ cmpq(rbx, Immediate(kExternalStringTag));
+ __ cmpp(rbx, Immediate(kExternalStringTag));
__ j(greater_equal, &not_seq_nor_cons); // Go to (7).
// (4) Cons string. Check that it's flat.
@@ -1614,7 +1542,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Move(kScratchRegister, address_of_regexp_stack_memory_address);
__ movp(r9, Operand(kScratchRegister, 0));
__ Move(kScratchRegister, address_of_regexp_stack_memory_size);
- __ addq(r9, Operand(kScratchRegister, 0));
+ __ addp(r9, Operand(kScratchRegister, 0));
__ movq(Operand(rsp, (argument_slots_on_stack - 3) * kRegisterSize), r9);
// Argument 6: Set the number of capture registers to zero to force global
@@ -1650,24 +1578,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
// Prepare start and end index of the input.
// Load the length from the original sliced string if that is the case.
- __ addq(rbx, r14);
+ __ addp(rbx, r14);
__ SmiToInteger32(arg_reg_3, FieldOperand(r15, String::kLengthOffset));
- __ addq(r14, arg_reg_3); // Using arg3 as scratch.
+ __ addp(r14, arg_reg_3); // Using arg3 as scratch.
// rbx: start index of the input
// r14: end index of the input
// r15: original subject string
__ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
__ j(zero, &setup_two_byte, Label::kNear);
- __ lea(arg_reg_4,
+ __ leap(arg_reg_4,
FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize));
- __ lea(arg_reg_3,
+ __ leap(arg_reg_3,
FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize));
__ jmp(&setup_rest, Label::kNear);
__ bind(&setup_two_byte);
- __ lea(arg_reg_4,
+ __ leap(arg_reg_4,
FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
- __ lea(arg_reg_3,
+ __ leap(arg_reg_3,
FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
__ bind(&setup_rest);
@@ -1679,7 +1607,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ movp(arg_reg_1, r15);
// Locate the code entry and call it.
- __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ addp(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(r11);
__ LeaveApiExitFrame(true);
@@ -1764,7 +1692,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
- __ subq(rdx, Immediate(1));
+ __ subp(rdx, Immediate(1));
__ j(negative, &done, Label::kNear);
// Read the value from the static offsets vector buffer and make it a smi.
__ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
@@ -1793,7 +1721,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
masm->ExternalOperand(pending_exception_address, rbx);
__ movp(rax, pending_exception_operand);
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ cmpq(rax, rdx);
+ __ cmpp(rax, rdx);
__ j(equal, &runtime);
__ movp(pending_exception_operand, rdx);
@@ -1807,7 +1735,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
// Deferred code for string handling.
// (7) Not a long external string? If yes, go to (10).
@@ -1828,7 +1756,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kTwoByteStringTag == 0);
// (8a) Is the external string one byte? If yes, go to (6).
__ testb(rbx, Immediate(kStringEncodingMask));
@@ -1890,7 +1818,7 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
Register scratch) {
__ JumpIfSmi(object, label);
__ movp(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(scratch,
+ __ movzxbp(scratch,
FieldOperand(scratch, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ testb(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
@@ -1910,9 +1838,9 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Compare two smis.
Label non_smi, smi_done;
__ JumpIfNotBothSmi(rax, rdx, &non_smi);
- __ subq(rdx, rax);
+ __ subp(rdx, rax);
__ j(no_overflow, &smi_done);
- __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
+ __ notp(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
__ bind(&smi_done);
__ movp(rax, rdx);
__ ret(0);
@@ -1926,7 +1854,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Two identical objects are equal unless they are both NaN or undefined.
{
Label not_identical;
- __ cmpq(rax, rdx);
+ __ cmpp(rax, rdx);
__ j(not_equal, &not_identical, Label::kNear);
if (cc != equal) {
@@ -1966,7 +1894,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ setcc(parity_even, rax);
// rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
if (cc == greater_equal || cc == greater) {
- __ neg(rax);
+ __ negp(rax);
}
__ ret(0);
@@ -2044,7 +1972,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Return a result of -1, 0, or 1, based on EFLAGS.
__ setcc(above, rax);
__ setcc(below, rcx);
- __ subq(rax, rcx);
+ __ subp(rax, rcx);
__ ret(0);
// If one of the numbers was NaN, then the result is always false.
@@ -2112,7 +2040,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// a heap object has the low bit clear.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagMask == 1);
- __ lea(rcx, Operand(rax, rdx, times_1, 0));
+ __ leap(rcx, Operand(rax, rdx, times_1, 0));
__ testb(rcx, Immediate(kSmiTagMask));
__ j(not_zero, &not_both_objects, Label::kNear);
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
@@ -2137,8 +2065,8 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Push arguments below the return address to prepare jump to builtin.
__ PopReturnAddressTo(rcx);
- __ push(rdx);
- __ push(rax);
+ __ Push(rdx);
+ __ Push(rax);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript builtin;
@@ -2161,92 +2089,118 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// rax : number of arguments to the construct function
- // rbx : cache cell for call target
+ // rbx : Feedback vector
+ // rdx : slot in feedback vector (Smi)
// rdi : the function to call
Isolate* isolate = masm->isolate();
- Label initialize, done, miss, megamorphic, not_array_function;
+ Label initialize, done, miss, megamorphic, not_array_function,
+ done_no_smi_convert;
// Load the cache state into rcx.
- __ movp(rcx, FieldOperand(rbx, Cell::kValueOffset));
+ __ SmiToInteger32(rdx, rdx);
+ __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ cmpq(rcx, rdi);
+ __ cmpp(rcx, rdi);
__ j(equal, &done);
- __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
+ __ Cmp(rcx, TypeFeedbackInfo::MegamorphicSentinel(isolate));
__ j(equal, &done);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in rcx.
- Handle<Map> allocation_site_map =
- masm->isolate()->factory()->allocation_site_map();
- __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
- __ j(not_equal, &miss);
-
- // Make sure the function is the Array() function
- __ LoadArrayFunction(rcx);
- __ cmpq(rdi, rcx);
- __ j(not_equal, &megamorphic);
- __ jmp(&done);
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in rcx.
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
+ __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
+ __ j(not_equal, &miss);
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
+ __ cmpp(rdi, rcx);
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done);
+ }
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
+ __ Cmp(rcx, TypeFeedbackInfo::UninitializedSentinel(isolate));
__ j(equal, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
+ __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+ TypeFeedbackInfo::MegamorphicSentinel(isolate));
__ jmp(&done);
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
__ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(rcx);
- __ cmpq(rdi, rcx);
- __ j(not_equal, &not_array_function);
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the cell
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Arguments register must be smi-tagged to call out.
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
- __ push(rdi);
- __ push(rbx);
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
+ __ cmpp(rdi, rcx);
+ __ j(not_equal, &not_array_function);
- CreateAllocationSiteStub create_stub;
- __ CallStub(&create_stub);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Arguments register must be smi-tagged to call out.
+ __ Integer32ToSmi(rax, rax);
+ __ Push(rax);
+ __ Push(rdi);
+ __ Integer32ToSmi(rdx, rdx);
+ __ Push(rdx);
+ __ Push(rbx);
+
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
+
+ __ Pop(rbx);
+ __ Pop(rdx);
+ __ Pop(rdi);
+ __ Pop(rax);
+ __ SmiToInteger32(rax, rax);
+ }
+ __ jmp(&done_no_smi_convert);
- __ pop(rbx);
- __ pop(rdi);
- __ pop(rax);
- __ SmiToInteger32(rax, rax);
+ __ bind(&not_array_function);
}
- __ jmp(&done);
- __ bind(&not_array_function);
- __ movp(FieldOperand(rbx, Cell::kValueOffset), rdi);
- // No need for a write barrier here - cells are rescanned.
+ __ movp(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+ rdi);
+
+ // We won't need rdx or rbx anymore, just save rdi
+ __ Push(rdi);
+ __ Push(rbx);
+ __ Push(rdx);
+ __ RecordWriteArray(rbx, rdi, rdx, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(rdx);
+ __ Pop(rbx);
+ __ Pop(rdi);
__ bind(&done);
+ __ Integer32ToSmi(rdx, rdx);
+
+ __ bind(&done_no_smi_convert);
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
- // rbx : cache cell for call target
+ // rbx : feedback vector
+ // rdx : (only if rbx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
// rdi : the function to call
Isolate* isolate = masm->isolate();
Label slow, non_function, wrap, cont;
@@ -2262,6 +2216,10 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+ // Type information was updated. Because we may call Array, which
+ // expects either undefined or an AllocationSite in rbx we need
+ // to set rbx to undefined.
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
}
}
@@ -2283,6 +2241,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &cont);
}
+
// Load the receiver from the stack.
__ movp(rax, args.GetReceiverOperand());
@@ -2305,15 +2264,18 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
+ // object (megamorphic symbol) so no write barrier is needed.
+ __ SmiToInteger32(rdx, rdx);
+ __ Move(FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize),
+ TypeFeedbackInfo::MegamorphicSentinel(isolate));
+ __ Integer32ToSmi(rdx, rdx);
}
// Check for function proxy.
__ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
__ j(not_equal, &non_function);
__ PopReturnAddressTo(rcx);
- __ push(rdi); // put proxy as additional argument under return address
+ __ Push(rdi); // put proxy as additional argument under return address
__ PushReturnAddressFrom(rcx);
__ Set(rax, argc_ + 1);
__ Set(rbx, 0);
@@ -2340,10 +2302,10 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ bind(&wrap);
// Wrap the receiver and patch it back onto the stack.
{ FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ push(rdi);
- __ push(rax);
+ __ Push(rdi);
+ __ Push(rax);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ pop(rdi);
+ __ Pop(rdi);
}
__ movp(args.GetReceiverOperand(), rax);
__ jmp(&cont);
@@ -2353,7 +2315,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// rax : number of arguments
- // rbx : cache cell for call target
+ // rbx : feedback vector
+ // rdx : (only if rbx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
// rdi : constructor function
Label slow, non_function_call;
@@ -2365,6 +2329,26 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+
+ __ SmiToInteger32(rdx, rdx);
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into ebx.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by rdx + 1.
+ __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into rbx, or undefined.
+ __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(FieldOperand(rbx, 0), Heap::kAllocationSiteMapRootIndex);
+ __ j(equal, &feedback_register_initialized);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(rbx);
}
// Jump to the function-specific construct stub.
@@ -2372,7 +2356,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ movp(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(jmp_reg, FieldOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
- __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
+ __ leap(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
__ jmp(jmp_reg);
// rdi: called object
@@ -2424,23 +2408,9 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
}
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- __ movp(scratch, value);
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ and_(scratch, Immediate(0xf));
- __ cmpq(scratch, Immediate(0xf));
- __ j(equal, oom_label);
-}
-
-
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate_scope) {
// rax: result parameter for PerformGC, if any.
@@ -2494,7 +2464,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
} else {
ASSERT_EQ(2, result_size_);
// Pass a pointer to the result location as the first argument.
- __ lea(rcx, StackSpaceOperand(2));
+ __ leap(rcx, StackSpaceOperand(2));
// Pass a pointer to the Arguments object as the second argument.
__ movp(rdx, r14); // argc.
__ movp(r8, r15); // argv.
@@ -2529,7 +2499,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ movq(rdx, Operand(rsp, 7 * kRegisterSize));
}
#endif
- __ lea(rcx, Operand(rax, 1));
+ __ leap(rcx, Operand(rax, 1));
// Lower 2 bits of rcx are 0 iff rax has failure tag.
__ testl(rcx, Immediate(kFailureTagMask));
__ j(zero, &failure_returned);
@@ -2547,9 +2517,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
__ j(zero, &retry, Label::kNear);
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
-
// Retrieve the pending exception.
ExternalReference pending_exception_address(
Isolate::kPendingExceptionAddress, masm->isolate());
@@ -2557,9 +2524,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
masm->ExternalOperand(pending_exception_address);
__ movp(rax, pending_exception_operand);
- // See if we just retrieved an OOM exception.
- JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
-
// Clear the pending exception.
pending_exception_operand =
masm->ExternalOperand(pending_exception_address);
@@ -2615,13 +2579,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Label throw_normal_exception;
Label throw_termination_exception;
- Label throw_out_of_memory_exception;
// Call into the runtime system.
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
false,
false);
@@ -2629,7 +2591,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
false);
@@ -2639,27 +2600,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
true);
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ Set(rax, static_cast<int64_t>(false));
- __ Store(external_caught, rax);
-
- // Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate);
- Label already_have_failure;
- JumpIfOOM(masm, rax, kScratchRegister, &already_have_failure);
- __ Move(rax, Failure::OutOfMemoryException(0x1), Assembler::RelocInfoNone());
- __ bind(&already_have_failure);
- __ Store(pending_exception, rax);
- // Fall through to the next label.
+ { FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(0);
+ __ CallCFunction(
+ ExternalReference::out_of_memory_function(masm->isolate()), 0);
+ }
__ bind(&throw_termination_exception);
__ ThrowUncatchable(rax);
@@ -2678,7 +2626,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
{ // NOLINT. Scope block confuses linter.
MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
// Set up frame.
- __ push(rbp);
+ __ pushq(rbp);
__ movp(rbp, rsp);
// Push the stack frame type marker twice.
@@ -2687,22 +2635,22 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// platform. It's free to use at this point.
// Cannot use smi-register for loading yet.
__ Move(kScratchRegister, Smi::FromInt(marker), Assembler::RelocInfoNone());
- __ push(kScratchRegister); // context slot
- __ push(kScratchRegister); // function slot
- // Save callee-saved registers (X64/Win64 calling conventions).
- __ push(r12);
- __ push(r13);
- __ push(r14);
- __ push(r15);
+ __ Push(kScratchRegister); // context slot
+ __ Push(kScratchRegister); // function slot
+ // Save callee-saved registers (X64/X32/Win64 calling conventions).
+ __ pushq(r12);
+ __ pushq(r13);
+ __ pushq(r14);
+ __ pushq(r15);
#ifdef _WIN64
- __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
- __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ pushq(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ pushq(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
#endif
- __ push(rbx);
+ __ pushq(rbx);
#ifdef _WIN64
// On Win64 XMM6-XMM15 are callee-save
- __ subq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
+ __ subp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
@@ -2727,13 +2675,13 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
{
Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
- __ push(c_entry_fp_operand);
+ __ Push(c_entry_fp_operand);
}
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
__ Load(rax, js_entry_sp);
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &not_outermost_js);
__ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ movp(rax, rbp);
@@ -2767,7 +2715,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ Store(pending_exception, rax);
// Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
+ __ Push(Immediate(0)); // receiver
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return. We load the address from an
@@ -2782,7 +2730,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
__ Load(rax, entry);
}
- __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
+ __ leap(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
__ call(kScratchRegister);
// Unlink this frame from the handler chain.
@@ -2790,7 +2738,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ bind(&exit);
// Check if the current stack frame is marked as the outermost JS frame.
- __ pop(rbx);
+ __ Pop(rbx);
__ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ j(not_equal, &not_outermost_js_2);
__ Move(kScratchRegister, js_entry_sp);
@@ -2799,7 +2747,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Restore the top frame descriptor from the stack.
{ Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
- __ pop(c_entry_fp_operand);
+ __ Pop(c_entry_fp_operand);
}
// Restore callee-saved registers (X64 conventions).
@@ -2815,23 +2763,23 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7));
__ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8));
__ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9));
- __ addq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
+ __ addp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
#endif
- __ pop(rbx);
+ __ popq(rbx);
#ifdef _WIN64
// Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
- __ pop(rsi);
- __ pop(rdi);
+ __ popq(rsi);
+ __ popq(rdi);
#endif
- __ pop(r15);
- __ pop(r14);
- __ pop(r13);
- __ pop(r12);
- __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
+ __ popq(r15);
+ __ popq(r14);
+ __ popq(r13);
+ __ popq(r12);
+ __ addp(rsp, Immediate(2 * kPointerSize)); // remove markers
// Restore frame pointer and return.
- __ pop(rbp);
+ __ popq(rbp);
__ ret(0);
}
@@ -2917,7 +2865,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
} else {
// Get return address and delta to inlined map check.
__ movq(kScratchRegister, StackOperandForReturnAddress(0));
- __ subq(kScratchRegister, args.GetArgumentOperand(2));
+ __ subp(kScratchRegister, args.GetArgumentOperand(2));
if (FLAG_debug_code) {
__ movl(rdi, Immediate(kWordBeforeMapCheckValue));
__ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
@@ -2934,9 +2882,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
Label loop, is_instance, is_not_instance;
__ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ cmpq(rcx, rbx);
+ __ cmpp(rcx, rbx);
__ j(equal, &is_instance, Label::kNear);
- __ cmpq(rcx, kScratchRegister);
+ __ cmpp(rcx, kScratchRegister);
// The code at is_not_instance assumes that kScratchRegister contains a
// non-zero GCable value (the null object in this case).
__ j(equal, &is_not_instance, Label::kNear);
@@ -2958,7 +2906,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
ASSERT(true_offset >= 0 && true_offset < 0x100);
__ movl(rax, Immediate(true_offset));
__ movq(kScratchRegister, StackOperandForReturnAddress(0));
- __ subq(kScratchRegister, args.GetArgumentOperand(2));
+ __ subp(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ movl(rax, Immediate(kWordBeforeResultValue));
@@ -2981,7 +2929,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
ASSERT(false_offset >= 0 && false_offset < 0x100);
__ movl(rax, Immediate(false_offset));
__ movq(kScratchRegister, StackOperandForReturnAddress(0));
- __ subq(kScratchRegister, args.GetArgumentOperand(2));
+ __ subp(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ movl(rax, Immediate(kWordBeforeResultValue));
@@ -2996,7 +2944,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
if (HasCallSiteInlineCheck()) {
// Remove extra value from the stack.
__ PopReturnAddressTo(rcx);
- __ pop(rax);
+ __ Pop(rax);
__ PushReturnAddressFrom(rcx);
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
@@ -3061,21 +3009,21 @@ void StringCharCodeAtGenerator::GenerateSlow(
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_); // Consumed by runtime conversion function.
+ __ Push(object_);
+ __ Push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
}
if (!index_.is(rax)) {
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ movp(index_, rax);
}
- __ pop(object_);
+ __ Pop(object_);
// Reload the instance type.
__ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
@@ -3090,10 +3038,10 @@ void StringCharCodeAtGenerator::GenerateSlow(
// is too complex (e.g., when the string needs to be flattened).
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
- __ push(object_);
+ __ Push(object_);
__ Integer32ToSmi(index_, index_);
- __ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ Push(index_);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
if (!result_.is(rax)) {
__ movp(result_, rax);
}
@@ -3130,7 +3078,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
- __ push(code_);
+ __ Push(code_);
__ CallRuntime(Runtime::kCharFromCode, 1);
if (!result_.is(rax)) {
__ movp(result_, rax);
@@ -3174,11 +3122,11 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Copy from edi to esi using rep movs instruction.
__ movl(kScratchRegister, count);
__ shr(count, Immediate(kPointerSizeLog2)); // Number of doublewords to copy.
- __ repmovsq();
+ __ repmovsp();
// Find number of bytes left.
__ movl(count, kScratchRegister);
- __ and_(count, Immediate(kPointerSize - 1));
+ __ andp(count, Immediate(kPointerSize - 1));
// Check if there are more bytes to copy.
__ bind(&last_bytes);
@@ -3190,8 +3138,8 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
__ bind(&loop);
__ movb(kScratchRegister, Operand(src, 0));
__ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
+ __ incp(src);
+ __ incp(dest);
__ decl(count);
__ j(not_zero, &loop);
@@ -3293,7 +3241,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
__ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
- __ cmpq(rcx, FieldOperand(rax, String::kLengthOffset));
+ __ cmpp(rcx, FieldOperand(rax, String::kLengthOffset));
Label not_original_string;
// Shorter than original string's length: an actual substring.
__ j(below, &not_original_string, Label::kNear);
@@ -3339,7 +3287,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&sliced_string);
// Sliced string. Fetch parent and correct start index by offset.
- __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
+ __ addp(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
__ movp(rdi, FieldOperand(rax, SlicedString::kParentOffset));
// Update instance type.
__ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
@@ -3360,7 +3308,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rcx: length
// If coming from the make_two_character_string path, the string
// is too short to be sliced anyways.
- __ cmpq(rcx, Immediate(SlicedString::kMinLength));
+ __ cmpp(rcx, Immediate(SlicedString::kMinLength));
// Short slice. Copy instead of slicing.
__ j(less, &copy_routine);
// Allocate new sliced string. At this point we do not reload the instance
@@ -3410,7 +3358,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ bind(&sequential_string);
STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
@@ -3425,11 +3373,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ movp(r14, rsi); // esi used by following code.
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
- __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
+ __ leap(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
+ __ leap(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
// rax: result string
// rcx: result length
@@ -3450,11 +3398,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ movp(r14, rsi); // esi used by following code.
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
- __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
+ __ leap(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
+ __ leap(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
// rax: result string
// rcx: result length
@@ -3468,7 +3416,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
__ bind(&single_char);
// rax: string
@@ -3610,11 +3558,11 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// start. This means that loop ends when index reaches zero, which
// doesn't need an additional compare.
__ SmiToInteger32(length, length);
- __ lea(left,
+ __ leap(left,
FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
- __ lea(right,
+ __ leap(right,
FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
- __ neg(length);
+ __ negq(length);
Register index = length; // index = -length;
// Compare loop.
@@ -3642,7 +3590,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Check for identity.
Label not_same;
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
__ j(not_equal, &not_same, Label::kNear);
__ Move(rax, Smi::FromInt(EQUAL));
Counters* counters = masm->isolate()->counters();
@@ -3658,14 +3606,14 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->string_compare_native(), 1);
// Drop arguments from the stack
__ PopReturnAddressTo(rcx);
- __ addq(rsp, Immediate(2 * kPointerSize));
+ __ addp(rsp, Immediate(2 * kPointerSize));
__ PushReturnAddressFrom(rcx);
GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
@@ -3763,7 +3711,7 @@ void ArrayPushStub::Generate(MacroAssembler* masm) {
// Verify that the object can be transitioned in place.
const int origin_offset = header_size + elements_kind() * kPointerSize;
__ movp(rdi, FieldOperand(rbx, origin_offset));
- __ cmpq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ cmpp(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
__ j(not_equal, &call_builtin);
const int target_offset = header_size + target_kind * kPointerSize;
@@ -3777,7 +3725,7 @@ void ArrayPushStub::Generate(MacroAssembler* masm) {
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
// Store the value.
- __ lea(rdx, FieldOperand(rdi,
+ __ leap(rdx, FieldOperand(rdi,
rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ movp(Operand(rdx, 0), rcx);
@@ -3816,14 +3764,14 @@ void ArrayPushStub::Generate(MacroAssembler* masm) {
__ Load(rcx, new_space_allocation_top);
// Check if it's the end of elements.
- __ lea(rdx, FieldOperand(rdi,
+ __ leap(rdx, FieldOperand(rdi,
rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmpq(rdx, rcx);
+ __ cmpp(rdx, rcx);
__ j(not_equal, &call_builtin);
- __ addq(rcx, Immediate(kAllocationDelta * kPointerSize));
+ __ addp(rcx, Immediate(kAllocationDelta * kPointerSize));
Operand limit_operand = masm->ExternalOperand(new_space_allocation_limit);
- __ cmpq(rcx, limit_operand);
+ __ cmpp(rcx, limit_operand);
__ j(above, &call_builtin);
// We fit and could grow elements.
@@ -3901,13 +3849,13 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
if (GetCondition() == equal) {
// For equality we do not care about the sign of the result.
- __ subq(rax, rdx);
+ __ subp(rax, rdx);
} else {
Label done;
- __ subq(rdx, rax);
+ __ subp(rdx, rax);
__ j(no_overflow, &done, Label::kNear);
// Correct sign of result in case of overflow.
- __ not_(rdx);
+ __ notp(rdx);
__ bind(&done);
__ movp(rax, rdx);
}
@@ -3965,7 +3913,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ movl(rax, Immediate(0));
__ movl(rcx, Immediate(0));
__ setcc(above, rax); // Add one to zero if carry clear and not equal.
- __ sbbq(rax, rcx); // Subtract one if below (aka. carry set).
+ __ sbbp(rax, rcx); // Subtract one if below (aka. carry set).
__ ret(0);
__ bind(&unordered);
@@ -4013,16 +3961,16 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
// Check that both operands are internalized strings.
__ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
__ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- __ or_(tmp1, tmp2);
+ __ orp(tmp1, tmp2);
__ testb(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
__ j(not_zero, &miss, Label::kNear);
// Internalized strings are compared by identity.
Label done;
- __ cmpq(left, right);
+ __ cmpp(left, right);
// Make sure rax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(rax));
@@ -4057,15 +4005,15 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
// types loaded in tmp1 and tmp2.
__ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
__ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
__ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
// Unique names are compared by identity.
Label done;
- __ cmpq(left, right);
+ __ cmpp(left, right);
// Make sure rax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(rax));
@@ -4102,17 +4050,17 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// types loaded in tmp1 and tmp2.
__ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
__ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
__ movp(tmp3, tmp1);
STATIC_ASSERT(kNotStringTag != 0);
- __ or_(tmp3, tmp2);
+ __ orp(tmp3, tmp2);
__ testb(tmp3, Immediate(kIsNotStringMask));
__ j(not_zero, &miss);
// Fast check for identical strings.
Label not_same;
- __ cmpq(left, right);
+ __ cmpp(left, right);
__ j(not_equal, &not_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
@@ -4128,7 +4076,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
Label do_compare;
STATIC_ASSERT(kInternalizedTag == 0);
- __ or_(tmp1, tmp2);
+ __ orp(tmp1, tmp2);
__ testb(tmp1, Immediate(kIsNotInternalizedMask));
__ j(not_zero, &do_compare, Label::kNear);
// Make sure rax is non-zero. At this point input operands are
@@ -4154,13 +4102,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ bind(&runtime);
__ PopReturnAddressTo(tmp1);
- __ push(left);
- __ push(right);
+ __ Push(left);
+ __ Push(right);
__ PushReturnAddressFrom(tmp1);
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
__ bind(&miss);
@@ -4180,7 +4128,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
__ j(not_equal, &miss, Label::kNear);
ASSERT(GetCondition() == equal);
- __ subq(rax, rdx);
+ __ subp(rax, rdx);
__ ret(0);
__ bind(&miss);
@@ -4200,7 +4148,7 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
__ Cmp(rbx, known_map_);
__ j(not_equal, &miss, Label::kNear);
- __ subq(rax, rdx);
+ __ subp(rax, rdx);
__ ret(0);
__ bind(&miss);
@@ -4215,17 +4163,17 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rdx);
- __ push(rax);
- __ push(rdx);
- __ push(rax);
+ __ Push(rdx);
+ __ Push(rax);
+ __ Push(rdx);
+ __ Push(rax);
__ Push(Smi::FromInt(op_));
__ CallExternalReference(miss, 3);
// Compute the entry point of the rewritten stub.
- __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
- __ pop(rax);
- __ pop(rdx);
+ __ leap(rdi, FieldOperand(rax, Code::kHeaderSize));
+ __ Pop(rax);
+ __ Pop(rdx);
}
// Do a tail call to the rewritten stub.
@@ -4252,12 +4200,12 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Capacity is smi 2^n.
__ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
__ decl(index);
- __ and_(index,
+ __ andp(index,
Immediate(name->Hash() + NameDictionary::GetProbeOffset(i)));
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
+ __ leap(index, Operand(index, index, times_2, 0)); // index *= 3.
Register entity_name = r0;
// Having undefined at this place means the name is not contained.
@@ -4287,9 +4235,9 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
NameDictionaryLookupStub stub(properties, r0, r0, NEGATIVE_LOOKUP);
__ Push(Handle<Object>(name));
- __ push(Immediate(name->Hash()));
+ __ Push(Immediate(name->Hash()));
__ CallStub(&stub);
- __ testq(r0, r0);
+ __ testp(r0, r0);
__ j(not_zero, miss);
__ jmp(done);
}
@@ -4323,26 +4271,26 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
if (i > 0) {
__ addl(r1, Immediate(NameDictionary::GetProbeOffset(i)));
}
- __ and_(r1, r0);
+ __ andp(r1, r0);
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
+ __ leap(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
// Check if the key is identical to the name.
- __ cmpq(name, Operand(elements, r1, times_pointer_size,
+ __ cmpp(name, Operand(elements, r1, times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ j(equal, done);
}
NameDictionaryLookupStub stub(elements, r0, r1, POSITIVE_LOOKUP);
- __ push(name);
+ __ Push(name);
__ movl(r0, FieldOperand(name, Name::kHashFieldOffset));
__ shrl(r0, Immediate(Name::kHashShift));
- __ push(r0);
+ __ Push(r0);
__ CallStub(&stub);
- __ testq(r0, r0);
+ __ testp(r0, r0);
__ j(zero, miss);
__ jmp(done);
}
@@ -4369,7 +4317,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
__ decl(scratch);
- __ push(scratch);
+ __ Push(scratch);
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
@@ -4384,11 +4332,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
if (i > 0) {
__ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
}
- __ and_(scratch, Operand(rsp, 0));
+ __ andp(scratch, Operand(rsp, 0));
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
+ __ leap(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
// Having undefined at this place means the name is not contained.
__ movp(scratch, Operand(dictionary_,
@@ -4400,7 +4348,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ j(equal, &not_in_dictionary);
// Stop if found the property.
- __ cmpq(scratch, args.GetArgumentOperand(0));
+ __ cmpp(scratch, args.GetArgumentOperand(0));
__ j(equal, &in_dictionary);
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
@@ -4511,7 +4459,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
@@ -4524,13 +4472,13 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ ret(0);
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
Register address =
arg_reg_1.is(regs_.address()) ? kScratchRegister : regs_.address();
@@ -4546,18 +4494,10 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
@@ -4571,11 +4511,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental_pop_object;
__ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
- __ and_(regs_.scratch0(), regs_.object());
+ __ andp(regs_.scratch0(), regs_.object());
__ movp(regs_.scratch1(),
Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
- __ subq(regs_.scratch1(), Immediate(1));
+ __ subp(regs_.scratch1(), Immediate(1));
__ movp(Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset),
regs_.scratch1());
@@ -4626,13 +4566,13 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need an extra register for this, so we push the object register
// temporarily.
- __ push(regs_.object());
+ __ Push(regs_.object());
__ EnsureNotWhite(regs_.scratch0(), // The value.
regs_.scratch1(), // Scratch.
regs_.object(), // Scratch.
&need_incremental_pop_object,
Label::kNear);
- __ pop(regs_.object());
+ __ Pop(regs_.object());
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
@@ -4646,7 +4586,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
__ bind(&need_incremental_pop_object);
- __ pop(regs_.object());
+ __ Pop(regs_.object());
__ bind(&need_incremental);
@@ -4687,12 +4627,12 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ bind(&slow_elements);
__ PopReturnAddressTo(rdi);
- __ push(rbx);
- __ push(rcx);
- __ push(rax);
+ __ Push(rbx);
+ __ Push(rcx);
+ __ Push(rax);
__ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ push(rdx);
+ __ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Push(rdx);
__ PushReturnAddressFrom(rdi);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
@@ -4700,7 +4640,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ bind(&fast_elements);
__ SmiToInteger32(kScratchRegister, rcx);
__ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
+ __ leap(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
FixedArrayBase::kHeaderSize));
__ movp(Operand(rcx, 0), rax);
// Update the write barrier for the array store.
@@ -4744,7 +4684,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
? kPointerSize
: 0;
- __ lea(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
+ __ leap(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
__ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
}
@@ -4761,16 +4701,16 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// This stub can be called from essentially anywhere, so it needs to save
// all volatile and callee-save registers.
const size_t kNumSavedRegisters = 2;
- __ push(arg_reg_1);
- __ push(arg_reg_2);
+ __ pushq(arg_reg_1);
+ __ pushq(arg_reg_2);
// Calculate the original stack pointer and store it in the second arg.
- __ lea(arg_reg_2,
+ __ leap(arg_reg_2,
Operand(rsp, kNumSavedRegisters * kRegisterSize + kPCOnStackSize));
// Calculate the function address to the first arg.
__ movp(arg_reg_1, Operand(rsp, kNumSavedRegisters * kRegisterSize));
- __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+ __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
// Save the remainder of the volatile registers.
masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
@@ -4787,8 +4727,8 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Restore volatile regs.
masm->PopCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
- __ pop(arg_reg_2);
- __ pop(arg_reg_1);
+ __ popq(arg_reg_2);
+ __ popq(arg_reg_1);
__ Ret();
}
@@ -4850,7 +4790,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// look at the first argument
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movp(rcx, args.GetArgumentOperand(0));
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &normal_sequence);
if (mode == DISABLE_ALLOCATION_SITES) {
@@ -4867,7 +4807,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
+ // Fix kind and retry (only if we have an allocation site in the slot).
__ incl(rdx);
if (FLAG_debug_code) {
@@ -4951,7 +4891,7 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
AllocationSiteOverrideMode mode) {
if (argument_count_ == ANY) {
Label not_zero_case, not_one_case;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &not_zero_case);
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
@@ -4977,15 +4917,11 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
- // -- rbx : type info cell
+ // -- rbx : AllocationSite or undefined
// -- rdi : constructor
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
-
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
@@ -4999,31 +4935,21 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(rcx, MAP_TYPE, rcx);
__ Check(equal, kUnexpectedInitialMapForArrayFunction);
- // We should either have undefined in rbx or a valid cell
- Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ Cmp(rbx, undefined_sentinel);
- __ j(equal, &okay_here);
- __ Cmp(FieldOperand(rbx, 0), cell_map);
- __ Assert(equal, kExpectedPropertyCellInRegisterRbx);
- __ bind(&okay_here);
+ // We should either have undefined in rbx or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(rbx);
}
Label no_info;
- // If the type cell is undefined, or contains anything other than an
- // AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ Cmp(rbx, undefined_sentinel);
+ // If the feedback vector is the undefined value call an array constructor
+ // that doesn't use AllocationSites.
+ __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(equal, &no_info);
- __ movp(rbx, FieldOperand(rbx, Cell::kValueOffset));
- __ Cmp(FieldOperand(rbx, 0),
- masm->isolate()->factory()->allocation_site_map());
- __ j(not_equal, &no_info);
// Only look at the lower 16 bits of the transition info.
__ movp(rdx, FieldOperand(rbx, AllocationSite::kTransitionInfoOffset));
__ SmiToInteger32(rdx, rdx);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ and_(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
+ __ andp(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
@@ -5036,7 +4962,7 @@ void InternalArrayConstructorStub::GenerateCase(
Label not_zero_case, not_one_case;
Label normal_sequence;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &not_zero_case);
InternalArrayNoArgumentConstructorStub stub0(kind);
__ TailCallStub(&stub0);
@@ -5050,7 +4976,7 @@ void InternalArrayConstructorStub::GenerateCase(
// look at the first argument
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movp(rcx, args.GetArgumentOperand(0));
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &normal_sequence);
InternalArraySingleArgumentConstructorStub
@@ -5071,7 +4997,6 @@ void InternalArrayConstructorStub::GenerateCase(
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
- // -- rbx : type info cell
// -- rdi : constructor
// -- rsp[0] : return address
// -- rsp[8] : last argument
@@ -5096,9 +5021,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following masking takes care of that anyway.
- __ movzxbq(rcx, FieldOperand(rcx, Map::kBitField2Offset));
+ __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
- __ and_(rcx, Immediate(Map::kElementsKindMask));
+ __ andp(rcx, Immediate(Map::kElementsKindMask));
__ shr(rcx, Immediate(Map::kElementsKindShift));
if (FLAG_debug_code) {
@@ -5144,7 +5069,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register context = rsi;
int argc = ArgumentBits::decode(bit_field_);
- bool restore_context = RestoreContextBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
typedef FunctionCallbackArguments FCA;
@@ -5161,29 +5086,29 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
__ PopReturnAddressTo(return_address);
// context save
- __ push(context);
+ __ Push(context);
// load context from callee
__ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
// callee
- __ push(callee);
+ __ Push(callee);
// call data
- __ push(call_data);
+ __ Push(call_data);
Register scratch = call_data;
if (!call_data_undefined) {
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
}
// return value
- __ push(scratch);
+ __ Push(scratch);
// return value default
- __ push(scratch);
+ __ Push(scratch);
// isolate
__ Move(scratch,
ExternalReference::isolate_address(masm->isolate()));
- __ push(scratch);
+ __ Push(scratch);
// holder
- __ push(holder);
+ __ Push(holder);
__ movp(scratch, rsp);
// Push return address back on stack.
@@ -5197,7 +5122,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// FunctionCallbackInfo::implicit_args_.
__ movp(StackSpaceOperand(0), scratch);
- __ addq(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
+ __ addp(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
__ movp(StackSpaceOperand(1), scratch); // FunctionCallbackInfo::values_.
__ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_.
// FunctionCallbackInfo::is_construct_call_.
@@ -5216,23 +5141,25 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
ASSERT(!api_function_address.is(arguments_arg));
// v8::InvocationCallback's argument.
- __ lea(arguments_arg, StackSpaceOperand(0));
+ __ leap(arguments_arg, StackSpaceOperand(0));
Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
- StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength,
+ // Accessor for FunctionCallbackInfo and first js arg.
+ StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength + 1,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
- FCA::kArgsLength - 1 - FCA::kContextSaveIndex);
+ FCA::kArgsLength - FCA::kContextSaveIndex);
+ // Stores return the first js argument
Operand return_value_operand = args_from_rbp.GetArgumentOperand(
- FCA::kArgsLength - 1 - FCA::kReturnValueOffset);
+ is_store ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
__ CallApiFunctionAndReturn(
api_function_address,
thunk_address,
callback_arg,
argc + FCA::kArgsLength + 1,
return_value_operand,
- restore_context ? &context_restore_operand : NULL);
+ &context_restore_operand);
}
@@ -5263,17 +5190,17 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// Allocate v8::AccessorInfo in non-GCed stack space.
const int kArgStackSpace = 1;
- __ lea(name_arg, Operand(rsp, kPCOnStackSize));
+ __ leap(name_arg, Operand(rsp, kPCOnStackSize));
__ PrepareCallApiFunction(kArgStackSpace);
- __ lea(scratch, Operand(name_arg, 1 * kPointerSize));
+ __ leap(scratch, Operand(name_arg, 1 * kPointerSize));
// v8::PropertyAccessorInfo::args_.
__ movp(StackSpaceOperand(0), scratch);
// The context register (rsi) has been saved in PrepareCallApiFunction and
// could be used to pass arguments.
- __ lea(accessor_info_arg, StackSpaceOperand(0));
+ __ leap(accessor_info_arg, StackSpaceOperand(0));
Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index c65307a74..8c8ab691a 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -305,19 +305,19 @@ class RecordWriteStub: public PlatformCodeStub {
// We don't have to save scratch0_orig_ because it was given to us as
// a scratch register. But if we had to switch to a different reg then
// we should save the new scratch0_.
- if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
+ if (!scratch0_.is(scratch0_orig_)) masm->Push(scratch0_);
if (!rcx.is(scratch0_orig_) &&
!rcx.is(object_orig_) &&
!rcx.is(address_orig_)) {
- masm->push(rcx);
+ masm->Push(rcx);
}
- masm->push(scratch1_);
+ masm->Push(scratch1_);
if (!address_.is(address_orig_)) {
- masm->push(address_);
+ masm->Push(address_);
masm->movp(address_, address_orig_);
}
if (!object_.is(object_orig_)) {
- masm->push(object_);
+ masm->Push(object_);
masm->movp(object_, object_orig_);
}
}
@@ -328,19 +328,19 @@ class RecordWriteStub: public PlatformCodeStub {
// one, since only one of them can alias with rcx.
if (!object_.is(object_orig_)) {
masm->movp(object_orig_, object_);
- masm->pop(object_);
+ masm->Pop(object_);
}
if (!address_.is(address_orig_)) {
masm->movp(address_orig_, address_);
- masm->pop(address_);
+ masm->Pop(address_);
}
- masm->pop(scratch1_);
+ masm->Pop(scratch1_);
if (!rcx.is(scratch0_orig_) &&
!rcx.is(object_orig_) &&
!rcx.is(address_orig_)) {
- masm->pop(rcx);
+ masm->Pop(rcx);
}
- if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
+ if (!scratch0_.is(scratch0_orig_)) masm->Pop(scratch0_);
}
// If we have to call into C then we need to save and restore all caller-
@@ -401,7 +401,7 @@ class RecordWriteStub: public PlatformCodeStub {
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index f292f7d25..9b92dc867 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -66,13 +66,13 @@ UnaryMathFunction CreateExpFunction() {
// xmm0: raw double input.
XMMRegister input = xmm0;
XMMRegister result = xmm1;
- __ push(rax);
- __ push(rbx);
+ __ pushq(rax);
+ __ pushq(rbx);
MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
- __ pop(rbx);
- __ pop(rax);
+ __ popq(rbx);
+ __ popq(rax);
__ movsd(xmm0, result);
__ Ret();
@@ -300,7 +300,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new backing store.
__ bind(&new_backing_store);
- __ lea(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
+ __ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
__ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
// Set backing store's map
__ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
@@ -353,7 +353,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
__ bind(&entry);
- __ decq(r9);
+ __ decp(r9);
__ j(not_sign, &loop);
__ bind(&done);
@@ -381,13 +381,13 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
__ j(equal, &only_change_map);
- __ push(rax);
+ __ Push(rax);
__ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
__ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
// r8 : source FixedDoubleArray
// r9 : number of elements
- __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
+ __ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
__ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
// r11: destination FixedArray
__ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
@@ -404,7 +404,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Call into runtime if GC is required.
__ bind(&gc_required);
- __ pop(rax);
+ __ Pop(rax);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(fail);
@@ -446,7 +446,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
rdi);
__ bind(&entry);
- __ decq(r9);
+ __ decp(r9);
__ j(not_sign, &loop);
// Replace receiver's backing store with newly created and filled FixedArray.
@@ -458,7 +458,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- __ pop(rax);
+ __ Pop(rax);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&only_change_map);
@@ -496,7 +496,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Handle slices.
Label indirect_string_loaded;
__ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
- __ addq(index, result);
+ __ addp(index, result);
__ movp(string, FieldOperand(string, SlicedString::kParentOffset));
__ jmp(&indirect_string_loaded, Label::kNear);
@@ -606,13 +606,13 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ movq(temp2, double_scratch);
__ subsd(double_scratch, result);
__ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
- __ lea(temp1, Operand(temp2, 0x1ff800));
- __ and_(temp2, Immediate(0x7ff));
+ __ leaq(temp1, Operand(temp2, 0x1ff800));
+ __ andq(temp2, Immediate(0x7ff));
__ shr(temp1, Immediate(11));
__ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
__ Move(kScratchRegister, ExternalReference::math_exp_log_table());
__ shl(temp1, Immediate(52));
- __ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0));
+ __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
__ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
__ subsd(double_scratch, input);
__ movsd(input, double_scratch);
@@ -640,10 +640,10 @@ static byte* GetNoCodeAgeSequence(uint32_t* length) {
// following boilerplate stack-building prologue that is found both in
// FUNCTION and OPTIMIZED_FUNCTION code:
CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->push(rbp);
+ patcher.masm()->pushq(rbp);
patcher.masm()->movp(rbp, rsp);
- patcher.masm()->push(rsi);
- patcher.masm()->push(rdi);
+ patcher.masm()->Push(rsi);
+ patcher.masm()->Push(rdi);
initialized = true;
}
return sequence;
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 8ae03deae..36d5df678 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -121,7 +121,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
Register reg = { r };
ASSERT(!reg.is(kScratchRegister));
if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
+ __ Push(reg);
}
if ((non_object_regs & (1 << r)) != 0) {
__ PushInt64AsTwoSmis(reg);
@@ -145,7 +145,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ Set(reg, kDebugZapValue);
}
if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
+ __ Pop(reg);
}
// Reconstruct the 64-bit value from two smis.
if ((non_object_regs & (1 << r)) != 0) {
@@ -154,9 +154,9 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
}
// Read current padding counter and skip corresponding number of words.
- __ pop(kScratchRegister);
+ __ Pop(kScratchRegister);
__ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ lea(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
+ __ leap(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
// Get rid of the internal frame.
}
@@ -164,7 +164,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
- __ addq(rsp, Immediate(kPCOnStackSize));
+ __ addp(rsp, Immediate(kPCOnStackSize));
}
// Now that the break point has been handled, resume normal execution by
@@ -173,7 +173,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
ExternalReference after_break_target =
ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
__ Move(kScratchRegister, after_break_target);
- __ jmp(Operand(kScratchRegister, 0));
+ __ Jump(Operand(kScratchRegister, 0));
}
@@ -261,9 +261,11 @@ void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-x64.cc).
// ----------- S t a t e -------------
// -- rdi : function
- // -- rbx: cache cell for call target
+ // -- rbx: feedback array
+ // -- rdx: slot in feedback array
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), 0, false);
+ Generate_DebugBreakCallHelper(masm, rbx.bit() | rdx.bit() | rdi.bit(),
+ 0, false);
}
@@ -285,10 +287,12 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// above IC call.
// ----------- S t a t e -------------
// -- rax: number of arguments
- // -- rbx: cache cell for call target
+ // -- rbx: feedback array
+ // -- rdx: feedback slot (smi)
// -----------------------------------
// The number of arguments in rax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), rax.bit(), false);
+ Generate_DebugBreakCallHelper(masm, rbx.bit() | rdx.bit() | rdi.bit(),
+ rax.bit(), false);
}
@@ -323,10 +327,10 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ movp(Operand(rax, 0), Immediate(0));
// We do not know our frame height, but set rsp based on rbp.
- __ lea(rsp, Operand(rbp, -1 * kPointerSize));
+ __ leap(rsp, Operand(rbp, -1 * kPointerSize));
- __ pop(rdi); // Function.
- __ pop(rbp);
+ __ Pop(rdi); // Function.
+ __ popq(rbp);
// Load context from the function.
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
@@ -334,7 +338,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// Get function code.
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ __ leap(rdx, FieldOperand(rdx, Code::kHeaderSize));
// Re-run JSFunction, rdi is function, rsi is context.
__ jmp(rdx);
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index aee8be6e1..4bc644def 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -51,6 +51,26 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->int3();
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->int3();
+ }
+ }
+
// For each LLazyBailout instruction insert a absolute call to the
// corresponding deoptimization entry, or a short call to an absolute
// jump if space is short. The absolute jumps are put in a table just
@@ -63,6 +83,12 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
#endif
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
+ deopt_data->SetSharedFunctionInfo(Smi::FromInt(0));
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
// Position where Call will be patched in.
@@ -141,7 +167,7 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::NumAllocatableRegisters();
- __ subq(rsp, Immediate(kDoubleRegsSize));
+ __ subp(rsp, Immediate(kDoubleRegsSize));
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
@@ -153,7 +179,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// to restore all later.
for (int i = 0; i < kNumberOfRegisters; i++) {
Register r = Register::from_code(i);
- __ push(r);
+ __ pushq(r);
}
const int kSavedRegistersAreaSize = kNumberOfRegisters * kRegisterSize +
@@ -170,11 +196,11 @@ void Deoptimizer::EntryGenerator::Generate() {
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
__ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize));
- __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize +
+ __ leap(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize +
kPCOnStackSize));
- __ subq(arg5, rbp);
- __ neg(arg5);
+ __ subp(arg5, rbp);
+ __ negp(arg5);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6);
@@ -204,40 +230,40 @@ void Deoptimizer::EntryGenerator::Generate() {
// Fill in the input registers.
for (int i = kNumberOfRegisters -1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ pop(Operand(rbx, offset));
+ __ Pop(Operand(rbx, offset));
}
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
int dst_offset = i * kDoubleSize + double_regs_offset;
- __ pop(Operand(rbx, dst_offset));
+ __ popq(Operand(rbx, dst_offset));
}
// Remove the bailout id and return address from the stack.
- __ addq(rsp, Immediate(1 * kRegisterSize + kPCOnStackSize));
+ __ addp(rsp, Immediate(1 * kRegisterSize + kPCOnStackSize));
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
__ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
- __ addq(rcx, rsp);
+ __ addp(rcx, rsp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
- __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
+ __ leap(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
Label pop_loop_header;
__ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
- __ pop(Operand(rdx, 0));
- __ addq(rdx, Immediate(sizeof(intptr_t)));
+ __ Pop(Operand(rdx, 0));
+ __ addp(rdx, Immediate(sizeof(intptr_t)));
__ bind(&pop_loop_header);
- __ cmpq(rcx, rsp);
+ __ cmpp(rcx, rsp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
- __ push(rax);
+ __ pushq(rax);
__ PrepareCallCFunction(2);
__ movp(arg_reg_1, rax);
__ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate()));
@@ -246,7 +272,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate()), 2);
}
- __ pop(rax);
+ __ popq(rax);
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop,
@@ -255,7 +281,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
__ movp(rax, Operand(rax, Deoptimizer::output_offset()));
- __ lea(rdx, Operand(rax, rdx, times_pointer_size, 0));
+ __ leap(rdx, Operand(rax, rdx, times_pointer_size, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
@@ -263,14 +289,14 @@ void Deoptimizer::EntryGenerator::Generate() {
__ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
- __ subq(rcx, Immediate(sizeof(intptr_t)));
- __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
+ __ subp(rcx, Immediate(sizeof(intptr_t)));
+ __ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(not_zero, &inner_push_loop);
- __ addq(rax, Immediate(kPointerSize));
+ __ addp(rax, Immediate(kPointerSize));
__ bind(&outer_loop_header);
- __ cmpq(rax, rdx);
+ __ cmpp(rax, rdx);
__ j(below, &outer_push_loop);
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
@@ -280,14 +306,14 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Push state, pc, and continuation from the last output frame.
- __ push(Operand(rbx, FrameDescription::state_offset()));
- __ push(Operand(rbx, FrameDescription::pc_offset()));
- __ push(Operand(rbx, FrameDescription::continuation_offset()));
+ __ Push(Operand(rbx, FrameDescription::state_offset()));
+ __ Push(Operand(rbx, FrameDescription::pc_offset()));
+ __ Push(Operand(rbx, FrameDescription::continuation_offset()));
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ push(Operand(rbx, offset));
+ __ Push(Operand(rbx, offset));
}
// Restore the registers from the stack.
@@ -299,7 +325,7 @@ void Deoptimizer::EntryGenerator::Generate() {
ASSERT(i > 0);
r = Register::from_code(i - 1);
}
- __ pop(r);
+ __ popq(r);
}
// Set up the roots register.
@@ -317,7 +343,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
- __ push_imm32(i);
+ __ pushq_imm32(i);
__ jmp(&done);
ASSERT(masm()->pc_offset() - start == table_entry_size_);
}
@@ -335,6 +361,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
}
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
#undef __
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 2d659cf0e..b870eae85 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -485,9 +485,11 @@ int DisassemblerX64::PrintRightOperandHelper(
} else if (base == 5) {
// base == rbp means no base register (when mod == 0).
int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
- AppendToBuffer("[%s*%d+0x%x]",
+ AppendToBuffer("[%s*%d%s0x%x]",
NameOfCPURegister(index),
- 1 << scale, disp);
+ 1 << scale,
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
return 6;
} else if (index != 4 && base != 5) {
// [base+index*scale]
@@ -512,38 +514,29 @@ int DisassemblerX64::PrintRightOperandHelper(
int scale, index, base;
get_sib(sib, &scale, &index, &base);
int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 2)
- : *reinterpret_cast<char*>(modrmp + 2);
+ : *reinterpret_cast<int8_t*>(modrmp + 2);
if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
- if (-disp > 0) {
- AppendToBuffer("[%s-0x%x]", NameOfCPURegister(base), -disp);
- } else {
- AppendToBuffer("[%s+0x%x]", NameOfCPURegister(base), disp);
- }
+ AppendToBuffer("[%s%s0x%x]",
+ NameOfCPURegister(base),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
} else {
- if (-disp > 0) {
- AppendToBuffer("[%s+%s*%d-0x%x]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale,
- -disp);
- } else {
- AppendToBuffer("[%s+%s*%d+0x%x]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale,
- disp);
- }
+ AppendToBuffer("[%s+%s*%d%s0x%x]",
+ NameOfCPURegister(base),
+ NameOfCPURegister(index),
+ 1 << scale,
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
}
return mod == 2 ? 6 : 3;
} else {
// No sib.
int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 1)
- : *reinterpret_cast<char*>(modrmp + 1);
- if (-disp > 0) {
- AppendToBuffer("[%s-0x%x]", NameOfCPURegister(rm), -disp);
- } else {
- AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
- }
+ : *reinterpret_cast<int8_t*>(modrmp + 1);
+ AppendToBuffer("[%s%s0x%x]",
+ NameOfCPURegister(rm),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
return (mod == 2) ? 5 : 2;
}
break;
@@ -1096,6 +1089,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x50) {
AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x73) {
+ current += 1;
+ ASSERT(regop == 6);
+ AppendToBuffer("psllq,%s,%d", NameOfXMMRegister(rm), *current & 0x7f);
+ current += 1;
} else {
const char* mnemonic = "?";
if (opcode == 0x54) {
@@ -1326,6 +1324,12 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else {
AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
}
+ } else if (opcode == 0xBD) {
+ AppendToBuffer("%s%c ", mnemonic, operand_size_code());
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("%s,", NameOfCPURegister(regop));
+ current += PrintRightOperand(current);
} else {
UnimplementedInstruction();
}
@@ -1368,6 +1372,8 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
return "movzxb";
case 0xB7:
return "movzxw";
+ case 0xBD:
+ return "bsr";
case 0xBE:
return "movsxb";
case 0xBF:
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index 6eb02a917..1fb77ffa6 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -77,6 +77,8 @@ class ExitFrameConstants : public AllStatic {
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
static const int kCallerSPDisplacement = kCallerPCOffset + kPCOnStackSize;
+
+ static const int kConstantPoolOffset = 0; // Not used
};
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 621eacc70..f0b943862 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -101,6 +101,23 @@ class JumpPatchSite BASE_EMBEDDED {
};
+static void EmitStackCheck(MacroAssembler* masm_,
+ int pointers = 0,
+ Register scratch = rsp) {
+ Isolate* isolate = masm_->isolate();
+ Label ok;
+ ASSERT(scratch.is(rsp) == (pointers == 0));
+ if (pointers != 0) {
+ __ movq(scratch, rsp);
+ __ subq(scratch, Immediate(pointers * kPointerSize));
+ }
+ __ CompareRoot(scratch, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok, Label::kNear);
+ __ call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+}
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the
@@ -118,6 +135,9 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ InitializeFeedbackVector();
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -132,10 +152,10 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Classic mode functions and builtins need to replace the receiver with the
+ // Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->is_classic_mode() && !info->is_native()) {
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
// +1 for return address.
StackArgumentsAccessor args(rsp, info->scope()->num_parameters());
@@ -168,9 +188,28 @@ void FullCodeGenerator::Generate() {
if (locals_count == 1) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
} else if (locals_count > 1) {
+ if (locals_count >= 128) {
+ EmitStackCheck(masm_, locals_count, rcx);
+ }
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < locals_count; i++) {
- __ push(rdx);
+ const int kMaxPushes = 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ movq(rcx, Immediate(loop_iterations));
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ for (int i = 0; i < kMaxPushes; i++) {
+ __ Push(rdx);
+ }
+ // Continue loop if not done.
+ __ decq(rcx);
+ __ j(not_zero, &loop_header, Label::kNear);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ for (int i = 0; i < remaining; i++) {
+ __ Push(rdx);
}
}
}
@@ -183,15 +222,15 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in rdi.
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
- __ push(rdi);
+ __ Push(rdi);
__ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ push(rdi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in rax. It replaces the context passed to us.
@@ -225,28 +264,28 @@ void FullCodeGenerator::Generate() {
// case the "arguments" or ".arguments" variables are in the context.
Comment cmnt(masm_, "[ Allocate arguments object");
if (function_in_register) {
- __ push(rdi);
+ __ Push(rdi);
} else {
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
// The receiver is just before the parameters on the caller's stack.
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- __ lea(rdx,
+ __ leap(rdx,
Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
- __ push(rdx);
+ __ Push(rdx);
__ Push(Smi::FromInt(num_parameters));
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
+ if (strict_mode() == STRICT) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
ArgumentsAccessStub stub(type);
__ CallStub(&stub);
@@ -272,7 +311,7 @@ void FullCodeGenerator::Generate() {
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableDeclaration* function = scope()->function();
ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
+ function->proxy()->var()->mode() == CONST_LEGACY);
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
@@ -281,11 +320,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok, Label::kNear);
- __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ bind(&ok);
+ EmitStackCheck(masm_);
}
{ Comment cmnt(masm_, "[ Body");
@@ -360,7 +395,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else {
__ bind(&return_label_);
if (FLAG_trace) {
- __ push(rax);
+ __ Push(rax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
// Pretend that the exit is a backwards jump to the entry.
@@ -375,10 +410,10 @@ void FullCodeGenerator::EmitReturnSequence() {
EmitProfilingCounterDecrement(weight);
Label ok;
__ j(positive, &ok, Label::kNear);
- __ push(rax);
+ __ Push(rax);
__ call(isolate()->builtins()->InterruptCheck(),
RelocInfo::CODE_TARGET);
- __ pop(rax);
+ __ Pop(rax);
EmitProfilingCounterReset();
__ bind(&ok);
#ifdef DEBUG
@@ -391,7 +426,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
__ movp(rsp, rbp);
- __ pop(rbp);
+ __ popq(rbp);
int no_frame_start = masm_->pc_offset();
int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
@@ -429,7 +464,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
MemOperand operand = codegen()->VarOperand(var, result_register());
- __ push(operand);
+ __ Push(operand);
}
@@ -638,8 +673,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, NOT_CONTEXTUAL, condition->test_id());
- __ testq(result_register(), result_register());
+ CallIC(ic, condition->test_id());
+ __ testp(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
@@ -755,7 +790,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_->Add(variable->name(), zone());
@@ -787,7 +822,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
- __ push(rsi);
+ __ Push(rsi);
__ Push(variable->name());
// Declaration nodes are always introduced in one of four modes.
ASSERT(IsDeclaredVariableMode(mode));
@@ -803,7 +838,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
} else {
__ Push(Smi::FromInt(0)); // Indicates no initial value.
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -853,11 +888,11 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
- __ push(rsi);
+ __ Push(rsi);
__ Push(variable->name());
__ Push(Smi::FromInt(NONE));
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -924,10 +959,10 @@ void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
- __ push(rsi); // The context is the first argument.
+ __ Push(rsi); // The context is the first argument.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
// Return value is ignored.
}
@@ -935,7 +970,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
// Return value is ignored.
}
@@ -978,10 +1013,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
if (inline_smi_code) {
Label slow_case;
__ movp(rcx, rdx);
- __ or_(rcx, rax);
+ __ orp(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
@@ -991,7 +1026,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, NOT_CONTEXTUAL, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
Label skip;
@@ -1003,7 +1038,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
__ jmp(clause->body_target());
__ bind(&skip);
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
@@ -1035,6 +1070,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1048,7 +1084,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(equal, &exit);
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmpq(rax, null_value);
+ __ cmpp(rax, null_value);
__ j(equal, &exit);
PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
@@ -1059,10 +1095,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &done_convert);
__ bind(&convert);
- __ push(rax);
+ __ Push(rax);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
- __ push(rax);
+ __ Push(rax);
// Check for proxies.
Label call_runtime;
@@ -1084,7 +1120,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
- __ push(rax); // Duplicate the enumerable object on the stack.
+ __ Push(rax); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
// If we got a map from the runtime call, we can do a fast
@@ -1109,28 +1145,29 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ movp(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
- __ push(rax); // Map.
- __ push(rcx); // Enumeration cache.
- __ push(rdx); // Number of valid entries for the map in the enum cache.
+ __ Push(rax); // Map.
+ __ Push(rcx); // Enumeration cache.
+ __ Push(rdx); // Number of valid entries for the map in the enum cache.
__ Push(Smi::FromInt(0)); // Initial index.
__ jmp(&loop);
__ bind(&no_descriptors);
- __ addq(rsp, Immediate(kPointerSize));
+ __ addp(rsp, Immediate(kPointerSize));
__ jmp(&exit);
// We got a fixed array in register rax. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ Move(rbx, cell);
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
+ Handle<Object> feedback = Handle<Object>(
+ Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
+ isolate());
+ StoreFeedbackVectorSlot(slot, feedback);
+ // No need for a write barrier, we are storing a Smi in the feedback vector.
+ __ Move(rbx, FeedbackVector());
+ __ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(slot)),
+ Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker));
__ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
__ movp(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
@@ -1138,17 +1175,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(above, &non_proxy);
__ Move(rbx, Smi::FromInt(0)); // Zero indicates proxy
__ bind(&non_proxy);
- __ push(rbx); // Smi
- __ push(rax); // Array
+ __ Push(rbx); // Smi
+ __ Push(rax); // Array
__ movp(rax, FieldOperand(rax, FixedArray::kLengthOffset));
- __ push(rax); // Fixed array length (as smi).
+ __ Push(rax); // Fixed array length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
__ movp(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
- __ cmpq(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
+ __ cmpp(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
__ j(above_equal, loop_statement.break_label());
// Get the current entry of the array into register rbx.
@@ -1167,7 +1204,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// If not, we may have to filter the key.
Label update_each;
__ movp(rcx, Operand(rsp, 4 * kPointerSize));
- __ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ cmpp(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
// For proxies, no filtering is done.
@@ -1178,8 +1215,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
- __ push(rcx); // Enumerable.
- __ push(rbx); // Current entry.
+ __ Push(rcx); // Enumerable.
+ __ Push(rbx); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
__ Cmp(rax, Smi::FromInt(0));
__ j(equal, loop_statement.continue_label());
@@ -1207,7 +1244,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
- __ addq(rsp, Immediate(5 * kPointerSize));
+ __ addp(rsp, Immediate(5 * kPointerSize));
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1238,7 +1275,7 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &done_convert);
__ bind(&convert);
- __ push(rax);
+ __ Push(rax);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
@@ -1287,16 +1324,16 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ FastNewClosureStub stub(info->strict_mode(), info->is_generator());
__ Move(rbx, info);
__ CallStub(&stub);
} else {
- __ push(rsi);
+ __ Push(rsi);
__ Push(info);
__ Push(pretenure
? isolate()->factory()->true_value()
: isolate()->factory()->false_value());
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
}
context()->Plug(rax);
}
@@ -1317,9 +1354,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
__ j(not_equal, slow);
}
@@ -1331,7 +1368,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// If no outer scope calls eval, we do not need to check more
// context extensions. If we have reached an eval scope, we check
// all extensions from this point.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1346,10 +1383,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ LoadRoot(kScratchRegister, Heap::kNativeContextMapRootIndex);
__ bind(&next);
// Terminate at native context.
- __ cmpq(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
+ __ cmpp(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
__ j(equal, &fast, Label::kNear);
// Check that extension is NULL.
- __ cmpq(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
+ __ cmpp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// Load next context in chain.
__ movp(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
@@ -1376,9 +1413,9 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
__ j(not_equal, slow);
}
@@ -1388,7 +1425,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
}
}
// Check that last extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
+ __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// This function is used only for loads, not stores, so it's safe to
@@ -1413,16 +1450,15 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ movp(rax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, done);
- if (local->mode() == CONST) {
+ if (local->mode() == CONST_LEGACY) {
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- } else { // LET || CONST_HARMONY
+ } else { // LET || CONST
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
}
}
__ jmp(done);
@@ -1439,7 +1475,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
__ Move(rcx, var->name());
@@ -1452,7 +1488,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context slot"
+ : "[ Stack slot");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1484,7 +1521,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Check that we always have valid source position.
ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
+ skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
@@ -1494,14 +1531,14 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
GetVar(rax, var);
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
+ ASSERT(var->mode() == CONST_LEGACY);
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
}
__ bind(&done);
@@ -1514,15 +1551,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup slot");
- __ push(rsi); // Context.
+ __ Push(rsi); // Context.
__ Push(var->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ bind(&done);
context()->Plug(rax);
break;
@@ -1549,11 +1586,11 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// Create regexp literal using runtime function
// Result will be in rax.
- __ push(rcx);
+ __ Push(rcx);
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(expr->pattern());
__ Push(expr->flags());
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
__ movp(rbx, rax);
__ bind(&materialized);
@@ -1563,10 +1600,10 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ jmp(&allocated);
__ bind(&runtime_allocate);
- __ push(rbx);
+ __ Push(rbx);
__ Push(Smi::FromInt(size));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(rbx);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ Pop(rbx);
__ bind(&allocated);
// Copy the content into the newly allocated memory.
@@ -1606,16 +1643,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
- if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1 || Serializer::enabled() ||
+ if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ Push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_properties);
__ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
} else {
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movp(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
@@ -1643,7 +1679,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Literal* key = property->key();
Expression* value = property->value();
if (!result_saved) {
- __ push(rax); // Save result on the stack
+ __ Push(rax); // Save result on the stack
result_saved = true;
}
switch (property->kind()) {
@@ -1658,14 +1694,14 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ Move(rcx, key->value());
__ movp(rdx, Operand(rsp, 0));
- CallStoreIC(NOT_CONTEXTUAL, key->LiteralFeedbackId());
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
}
break;
}
- __ push(Operand(rsp, 0)); // Duplicate receiver.
+ __ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
@@ -1676,7 +1712,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
case ObjectLiteral::Property::PROTOTYPE:
- __ push(Operand(rsp, 0)); // Duplicate receiver.
+ __ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(value);
if (property->emit_store()) {
__ CallRuntime(Runtime::kSetPrototype, 2);
@@ -1698,7 +1734,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end();
++it) {
- __ push(Operand(rsp, 0)); // Duplicate receiver.
+ __ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
@@ -1708,7 +1744,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (expr->has_function()) {
ASSERT(result_saved);
- __ push(Operand(rsp, 0));
+ __ Push(Operand(rsp, 0));
__ CallRuntime(Runtime::kToFastProperties, 1);
}
@@ -1764,11 +1800,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (expr->depth() > 1 || Serializer::enabled() ||
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_elements);
__ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
@@ -1800,7 +1836,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ push(rax); // array literal
+ __ Push(rax); // array literal
__ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
@@ -1830,7 +1866,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (result_saved) {
- __ addq(rsp, Immediate(kPointerSize)); // literal index
+ __ addp(rsp, Immediate(kPointerSize)); // literal index
context()->PlugTOS();
} else {
context()->Plug(rax);
@@ -1839,13 +1875,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -1867,7 +1899,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator.
VisitForAccumulatorValue(property->obj());
- __ push(result_register());
+ __ Push(result_register());
} else {
VisitForStackValue(property->obj());
}
@@ -1877,7 +1909,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->obj());
VisitForAccumulatorValue(property->key());
__ movp(rdx, Operand(rsp, 0));
- __ push(rax);
+ __ Push(rax);
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1907,7 +1939,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
Token::Value op = expr->binary_op();
- __ push(rax); // Left operand goes on the stack.
+ __ Push(rax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
@@ -1961,7 +1993,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
case Yield::SUSPEND:
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false);
- __ push(result_register());
+ __ Push(result_register());
// Fall through.
case Yield::INITIAL: {
Label suspend, continuation, post_runtime, resume;
@@ -1980,16 +2012,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ movp(rcx, rsi);
__ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
kDontSaveFPRegs);
- __ lea(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
- __ cmpq(rsp, rbx);
+ __ leap(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
+ __ cmpp(rsp, rbx);
__ j(equal, &post_runtime);
- __ push(rax); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ Push(rax); // generator object
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
- __ pop(result_register());
+ __ Pop(result_register());
EmitReturnSequence();
__ bind(&resume);
@@ -2026,26 +2058,26 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_catch);
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
__ LoadRoot(rcx, Heap::kthrow_stringRootIndex); // "throw"
- __ push(rcx);
- __ push(Operand(rsp, 2 * kPointerSize)); // iter
- __ push(rax); // exception
+ __ Push(rcx);
+ __ Push(Operand(rsp, 2 * kPointerSize)); // iter
+ __ Push(rax); // exception
__ jmp(&l_call);
// try { received = %yield result }
// Shuffle the received result above a try handler and yield it without
// re-boxing.
__ bind(&l_try);
- __ pop(rax); // result
+ __ Pop(rax); // result
__ PushTryHandler(StackHandler::CATCH, expr->index());
const int handler_size = StackHandlerConstants::kSize;
- __ push(rax); // result
+ __ Push(rax); // result
__ jmp(&l_suspend);
__ bind(&l_continuation);
__ jmp(&l_resume);
__ bind(&l_suspend);
const int generator_object_depth = kPointerSize + handler_size;
__ movp(rax, Operand(rsp, generator_object_depth));
- __ push(rax); // g
+ __ Push(rax); // g
ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(l_continuation.pos()));
@@ -2053,10 +2085,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ movp(rcx, rsi);
__ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
- __ pop(rax); // result
+ __ Pop(rax); // result
EmitReturnSequence();
__ bind(&l_resume); // received in rax
__ PopTryHandler();
@@ -2064,16 +2096,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
__ LoadRoot(rcx, Heap::knext_stringRootIndex); // "next"
- __ push(rcx);
- __ push(Operand(rsp, 2 * kPointerSize)); // iter
- __ push(rax); // received
+ __ Push(rcx);
+ __ Push(Operand(rsp, 2 * kPointerSize)); // iter
+ __ Push(rax); // received
// result = receiver[f](arg);
__ bind(&l_call);
__ movp(rdx, Operand(rsp, kPointerSize));
__ movp(rax, Operand(rsp, 2 * kPointerSize));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, TypeFeedbackId::None());
+ CallIC(ic, TypeFeedbackId::None());
__ movp(rdi, rax);
__ movp(Operand(rsp, 2 * kPointerSize), rdi);
CallFunctionStub stub(1, CALL_AS_METHOD);
@@ -2084,16 +2116,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// if (!result.done) goto l_try;
__ bind(&l_loop);
- __ push(rax); // save result
+ __ Push(rax); // save result
__ LoadRoot(rcx, Heap::kdone_stringRootIndex); // "done"
CallLoadIC(NOT_CONTEXTUAL); // result.done in rax
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ testq(result_register(), result_register());
+ __ testp(result_register(), result_register());
__ j(zero, &l_try);
// result.value
- __ pop(rax); // result
+ __ Pop(rax); // result
__ LoadRoot(rcx, Heap::kvalue_stringRootIndex); // "value"
CallLoadIC(NOT_CONTEXTUAL); // result.value in rax
context()->DropAndPlug(2, rax); // drop iter and g
@@ -2107,12 +2139,12 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in rax, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
// is read to throw the value when the resumed generator is already closed.
// rbx will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
- __ pop(rbx);
+ __ Pop(rbx);
// Check generator state.
Label wrong_state, closed_state, done;
@@ -2128,7 +2160,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
// Push receiver.
- __ push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
+ __ Push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
// Push holes for arguments to generator function.
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -2138,9 +2170,9 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
Label push_argument_holes, push_frame;
__ bind(&push_argument_holes);
- __ subq(rdx, Immediate(1));
+ __ subp(rdx, Immediate(1));
__ j(carry, &push_frame);
- __ push(rcx);
+ __ Push(rcx);
__ jmp(&push_argument_holes);
// Enter a new JavaScript frame, and initialize its slots as they were when
@@ -2150,10 +2182,10 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ call(&resume_frame);
__ jmp(&done);
__ bind(&resume_frame);
- __ push(rbp); // Caller's frame pointer.
+ __ pushq(rbp); // Caller's frame pointer.
__ movp(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
+ __ Push(rsi); // Callee's context.
+ __ Push(rdi); // Callee's JS Function.
// Load the operand stack size.
__ movp(rdx, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
@@ -2164,12 +2196,12 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// in directly.
if (resume_mode == JSGeneratorObject::NEXT) {
Label slow_resume;
- __ cmpq(rdx, Immediate(0));
+ __ cmpp(rdx, Immediate(0));
__ j(not_zero, &slow_resume);
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ SmiToInteger64(rcx,
FieldOperand(rbx, JSGeneratorObject::kContinuationOffset));
- __ addq(rdx, rcx);
+ __ addp(rdx, rcx);
__ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
__ jmp(rdx);
@@ -2180,15 +2212,15 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// up the stack and the handlers.
Label push_operand_holes, call_resume;
__ bind(&push_operand_holes);
- __ subq(rdx, Immediate(1));
+ __ subp(rdx, Immediate(1));
__ j(carry, &call_resume);
- __ push(rcx);
+ __ Push(rcx);
__ jmp(&push_operand_holes);
__ bind(&call_resume);
- __ push(rbx);
- __ push(result_register());
+ __ Push(rbx);
+ __ Push(result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ Abort(kGeneratorFailedToResume);
@@ -2201,15 +2233,15 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
EmitCreateIteratorResult(true);
} else {
// Throw the provided value.
- __ push(rax);
- __ CallRuntime(Runtime::kThrow, 1);
+ __ Push(rax);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
}
__ jmp(&done);
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
- __ push(rbx);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+ __ Push(rbx);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
@@ -2227,13 +2259,13 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&allocated);
__ Move(rbx, map);
- __ pop(rcx);
+ __ Pop(rcx);
__ Move(rdx, isolate()->factory()->ToBoolean(done));
ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
__ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
@@ -2264,7 +2296,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2277,17 +2309,16 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// stack (popped into rdx). Right operand is in rax but moved into
// rcx to make the shifts easier.
Label done, stub_call, smi_case;
- __ pop(rdx);
+ __ Pop(rdx);
__ movp(rcx, rax);
- __ or_(rax, rdx);
+ __ orp(rax, rdx);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(rax, &smi_case, Label::kNear);
__ bind(&stub_call);
__ movp(rax, rcx);
BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2333,23 +2364,17 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
- __ pop(rdx);
+ __ Pop(rdx);
BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(rax);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
+ ASSERT(expr->IsValidLeftHandSide());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2370,22 +2395,22 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
break;
}
case NAMED_PROPERTY: {
- __ push(rax); // Preserve value.
+ __ Push(rax); // Preserve value.
VisitForAccumulatorValue(prop->obj());
__ movp(rdx, rax);
- __ pop(rax); // Restore value.
+ __ Pop(rax); // Restore value.
__ Move(rcx, prop->key()->AsLiteral()->value());
- CallStoreIC(NOT_CONTEXTUAL);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
- __ push(rax); // Preserve value.
+ __ Push(rax); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ movp(rcx, rax);
- __ pop(rdx);
- __ pop(rax); // Restore value.
- Handle<Code> ic = is_classic_mode()
+ __ Pop(rdx);
+ __ Pop(rax); // Restore value.
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic);
@@ -2396,44 +2421,58 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ movp(location, rax);
+ if (var->IsContextSlot()) {
+ __ movp(rdx, rax);
+ __ RecordWriteContextSlot(
+ rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ Push(rax); // Value.
+ __ Push(rsi); // Context.
+ __ Push(name);
+ __ Push(Smi::FromInt(strict_mode));
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ Move(rcx, var->name());
__ movp(rdx, GlobalObjectOperand());
- CallStoreIC(CONTEXTUAL);
- } else if (op == Token::INIT_CONST) {
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
+ if (var->IsLookupSlot()) {
+ __ Push(rax);
+ __ Push(rsi);
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackLocal() || var->IsContextSlot());
Label skip;
- __ movp(rdx, StackOperand(var));
+ MemOperand location = VarOperand(var, rcx);
+ __ movp(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &skip);
- __ movp(StackOperand(var), rax);
+ EmitStoreToStackLocalOrContextSlot(var, location);
__ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
- __ push(rax);
- __ push(rsi);
- __ Push(var->name());
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ push(rax); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), strict_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2442,20 +2481,18 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &assign, Label::kNear);
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
__ bind(&assign);
- __ movp(location, rax);
- if (var->IsContextSlot()) {
- __ movp(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, rcx);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2463,20 +2500,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ Check(equal, kLetBindingReInitialization);
}
- // Perform the assignment.
- __ movp(location, rax);
- if (var->IsContextSlot()) {
- __ movp(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(rax); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2492,8 +2516,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ Move(rcx, prop->key()->AsLiteral()->value());
- __ pop(rdx);
- CallStoreIC(NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ __ Pop(rdx);
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2503,14 +2527,14 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
- __ pop(rcx);
- __ pop(rdx);
+ __ Pop(rcx);
+ __ Pop(rdx);
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2529,7 +2553,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ pop(rdx);
+ __ Pop(rdx);
EmitKeyedPropertyLoad(expr);
context()->Plug(rax);
}
@@ -2537,10 +2561,8 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- ContextualMode mode,
TypeFeedbackId ast_id) {
ic_total_count_++;
- ASSERT(mode != CONTEXTUAL || ast_id.IsNone());
__ call(code, RelocInfo::CODE_TARGET, ast_id);
}
@@ -2559,7 +2581,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
PrepareForBailout(callee, NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
- // is a classic mode method.
+ // is a sloppy mode method.
__ Push(isolate()->factory()->undefined_value());
flags = NO_CALL_FUNCTION_FLAGS;
} else {
@@ -2569,7 +2591,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
- __ push(Operand(rsp, 0));
+ __ Push(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
flags = CALL_AS_METHOD;
}
@@ -2613,7 +2635,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
- __ push(Operand(rsp, 0));
+ __ Push(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
// Load the arguments.
@@ -2650,15 +2672,15 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
SetSourcePosition(expr->position());
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ Move(rbx, cell);
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
+ __ Move(rbx, FeedbackVector());
+ __ Move(rdx, Smi::FromInt(expr->CallFeedbackSlot()));
// Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
+ __ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2670,23 +2692,23 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ push(Operand(rsp, arg_count * kPointerSize));
+ __ Push(Operand(rsp, arg_count * kPointerSize));
} else {
__ PushRoot(Heap::kUndefinedValueRootIndex);
}
// Push the receiver of the enclosing function and do runtime call.
StackArgumentsAccessor args(rbp, info_->scope()->num_parameters());
- __ push(args.GetReceiverOperand());
+ __ Push(args.GetReceiverOperand());
// Push the language mode.
- __ Push(Smi::FromInt(language_mode()));
+ __ Push(Smi::FromInt(strict_mode()));
// Push the start position of the scope the calls resides in.
__ Push(Smi::FromInt(scope()->start_position()));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
}
@@ -2702,8 +2724,8 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Call::CallType call_type = expr->GetCallType(isolate());
if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the call.
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2718,7 +2740,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Push a copy of the function (found below the arguments) and resolve
// eval.
- __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
+ __ Push(Operand(rsp, (arg_count + 1) * kPointerSize));
EmitResolvePossiblyDirectEval(arg_count);
// The runtime call returns a pair of values in rax (function) and
@@ -2751,11 +2773,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ bind(&slow);
// Call the runtime to find the function to call (returned in rax) and
// the object holding it (returned in rdx).
- __ push(context_register());
+ __ Push(context_register());
__ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ push(rax); // Function.
- __ push(rdx); // Receiver.
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+ __ Push(rax); // Function.
+ __ Push(rdx); // Receiver.
// If fast case code has been generated, emit code to push the function
// and receiver and have the slow path jump around this code.
@@ -2764,7 +2786,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ jmp(&call, Label::kNear);
__ bind(&done);
// Push function.
- __ push(rax);
+ __ Push(rax);
// The receiver is implicitly the global receiver. Indicate this by
// passing the hole to the call function stub.
__ PushRoot(Heap::kUndefinedValueRootIndex);
@@ -2830,10 +2852,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code, but not in the snapshot.
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ Move(rbx, cell);
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
+ if (FLAG_pretenuring_call_new) {
+ StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(),
+ isolate()->factory()->NewAllocationSite());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ Move(rbx, FeedbackVector());
+ __ Move(rdx, Smi::FromInt(expr->CallNewFeedbackSlot()));
CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@@ -2905,10 +2934,10 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, if_false);
- __ movzxbq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ movzxbp(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ cmpp(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(below, if_false);
- __ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmpp(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
@@ -2998,20 +3027,20 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Skip loop if no descriptors are valid.
__ NumberOfOwnDescriptors(rcx, rbx);
- __ cmpq(rcx, Immediate(0));
+ __ cmpp(rcx, Immediate(0));
__ j(equal, &done);
__ LoadInstanceDescriptors(rbx, r8);
// rbx: descriptor array.
// rcx: valid entries in the descriptor array.
// Calculate the end of the descriptor array.
- __ imul(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
+ __ imulp(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
- __ lea(rcx,
+ __ leap(rcx,
Operand(
r8, index.reg, index.scale, DescriptorArray::kFirstOffset));
// Calculate location of the first key name.
- __ addq(r8, Immediate(DescriptorArray::kFirstOffset));
+ __ addp(r8, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
// internalized string "valueOf" the result is false.
__ jmp(&entry);
@@ -3019,15 +3048,15 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ movp(rdx, FieldOperand(r8, 0));
__ Cmp(rdx, isolate()->factory()->value_of_string());
__ j(equal, if_false);
- __ addq(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
+ __ addp(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
- __ cmpq(r8, rcx);
+ __ cmpp(r8, rcx);
__ j(not_equal, &loop);
__ bind(&done);
// Set the bit in the map to indicate that there is no local valueOf field.
- __ or_(FieldOperand(rbx, Map::kBitField2Offset),
+ __ orp(FieldOperand(rbx, Map::kBitField2Offset),
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ bind(&skip_lookup);
@@ -3035,12 +3064,12 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- __ testq(rcx, Immediate(kSmiTagMask));
+ __ testp(rcx, Immediate(kSmiTagMask));
__ j(zero, if_false);
__ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
__ movp(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ movp(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
- __ cmpq(rcx,
+ __ cmpp(rcx,
ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3087,8 +3116,8 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
__ CheckMap(rax, map, if_false, DO_SMI_CHECK);
__ cmpl(FieldOperand(rax, HeapNumber::kExponentOffset),
- Immediate(0x80000000));
- __ j(not_equal, if_false);
+ Immediate(0x1));
+ __ j(no_overflow, if_false);
__ cmpl(FieldOperand(rax, HeapNumber::kMantissaOffset),
Immediate(0x00000000));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -3189,8 +3218,8 @@ void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ pop(rbx);
- __ cmpq(rax, rbx);
+ __ Pop(rbx);
+ __ cmpp(rax, rbx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3310,7 +3339,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
+ __ CallRuntime(Runtime::kHiddenLog, 2);
}
// Finally, we're expected to leave a value on the top of the stack.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@@ -3389,7 +3418,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
__ movp(scratch, stamp_operand);
- __ cmpq(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
+ __ cmpp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
__ movp(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
@@ -3405,7 +3434,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
__ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
__ bind(&done);
context()->Plug(rax);
}
@@ -3422,8 +3451,8 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
VisitForAccumulatorValue(args->at(0)); // string
- __ pop(value);
- __ pop(index);
+ __ Pop(value);
+ __ Pop(index);
if (FLAG_debug_code) {
__ Check(__ CheckSmi(value), kNonSmiValue);
@@ -3455,8 +3484,8 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
VisitForAccumulatorValue(args->at(0)); // string
- __ pop(value);
- __ pop(index);
+ __ Pop(value);
+ __ Pop(index);
if (FLAG_debug_code) {
__ Check(__ CheckSmi(value), kNonSmiValue);
@@ -3495,7 +3524,7 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // Load the object.
VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(rbx); // rax = value. rbx = object.
+ __ Pop(rbx); // rax = value. rbx = object.
Label done;
// If the object is a smi, return the value.
@@ -3560,7 +3589,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Register index = rax;
Register result = rdx;
- __ pop(object);
+ __ Pop(object);
Label need_conversion;
Label index_out_of_range;
@@ -3607,7 +3636,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
Register scratch = rdx;
Register result = rax;
- __ pop(object);
+ __ Pop(object);
Label need_conversion;
Label index_out_of_range;
@@ -3649,7 +3678,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
- __ pop(rdx);
+ __ Pop(rdx);
StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
__ CallStub(&stub);
context()->Plug(rax);
@@ -3713,7 +3742,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ jmp(&done);
__ bind(&runtime);
- __ push(rax);
+ __ Push(rax);
__ CallRuntime(Runtime::kCall, args->length());
__ bind(&done);
@@ -3728,8 +3757,8 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForAccumulatorValue(args->at(2));
- __ pop(rbx);
- __ pop(rcx);
+ __ Pop(rbx);
+ __ Pop(rcx);
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3770,7 +3799,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
// tmp now holds finger offset as a smi.
SmiIndex index =
__ SmiToIndex(kScratchRegister, tmp, kPointerSizeLog2);
- __ cmpq(key, FieldOperand(cache,
+ __ cmpp(key, FieldOperand(cache,
index.reg,
index.scale,
FixedArray::kHeaderSize));
@@ -3783,9 +3812,9 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ bind(&not_found);
// Call runtime to perform the lookup.
- __ push(cache);
- __ push(key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ Push(cache);
+ __ Push(key);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
__ bind(&done);
context()->Plug(rax);
@@ -3861,7 +3890,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Separator operand is already pushed. Make room for the two
// other stack fields, and clear the direction flag in anticipation
// of calling CopyBytes.
- __ subq(rsp, Immediate(2 * kPointerSize));
+ __ subp(rsp, Immediate(2 * kPointerSize));
__ cld();
// Check that the array is a JSArray
__ JumpIfSmi(array, &bailout);
@@ -3899,7 +3928,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Live loop registers: index(int32), array_length(int32), string(String*),
// scratch, string_length(int32), elements(FixedArray*).
if (generate_debug_code_) {
- __ cmpq(index, array_length);
+ __ cmpp(index, array_length);
__ Assert(below, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
}
__ bind(&loop);
@@ -3975,7 +4004,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout);
__ movp(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
+ __ leap(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
__ movp(string, separator_operand);
__ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset),
@@ -4003,7 +4032,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FixedArray::kHeaderSize));
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
@@ -4038,7 +4067,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Copy the separator character to the result.
__ movb(Operand(result_pos, 0), scratch);
- __ incq(result_pos);
+ __ incp(result_pos);
__ bind(&loop_2_entry);
// Get string = array[index].
@@ -4047,7 +4076,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FixedArray::kHeaderSize));
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
@@ -4063,16 +4092,16 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// count from -array_length to zero, so we don't need to maintain
// a loop limit.
__ movl(index, array_length_operand);
- __ lea(elements, FieldOperand(elements, index, times_pointer_size,
+ __ leap(elements, FieldOperand(elements, index, times_pointer_size,
FixedArray::kHeaderSize));
- __ neg(index);
+ __ negq(index);
// Replace separator string with pointer to its first character, and
// make scratch be its length.
__ movp(string, separator_operand);
__ SmiToInteger32(scratch,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
__ movp(separator_operand, string);
@@ -4098,7 +4127,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movp(string, Operand(elements, index, times_pointer_size, 0));
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incq(index);
@@ -4109,15 +4138,15 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ bind(&return_result);
// Drop temp values from the stack, and restore context register.
- __ addq(rsp, Immediate(3 * kPointerSize));
+ __ addp(rsp, Immediate(3 * kPointerSize));
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->Plug(rax);
}
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
@@ -4130,7 +4159,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
// Push the builtins object as receiver.
__ movp(rax, GlobalObjectOperand());
- __ push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
+ __ Push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
// Load the function from the receiver.
__ movp(rax, Operand(rsp, 0));
@@ -4138,7 +4167,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
// Push the target function under the receiver.
- __ push(Operand(rsp, 0));
+ __ Push(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
// Push the arguments ("left-to-right").
@@ -4179,20 +4208,18 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ Push(Smi::FromInt(strict_mode_flag));
+ __ Push(Smi::FromInt(strict_mode()));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
- __ push(GlobalObjectOperand());
+ __ Push(GlobalObjectOperand());
__ Push(var->name());
- __ Push(Smi::FromInt(kNonStrictMode));
+ __ Push(Smi::FromInt(SLOPPY));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
@@ -4203,9 +4230,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
- __ push(context_register());
+ __ Push(context_register());
__ Push(var->name());
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
context()->Plug(rax);
}
} else {
@@ -4286,16 +4313,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Invalid left-hand-sides are rewritten to have a 'throw
- // ReferenceError' as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -4320,13 +4342,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
if (assign_type == NAMED_PROPERTY) {
VisitForAccumulatorValue(prop->obj());
- __ push(rax); // Copy of receiver, needed for later store.
+ __ Push(rax); // Copy of receiver, needed for later store.
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ movp(rdx, Operand(rsp, 0)); // Leave receiver on stack
- __ push(rax); // Copy of key, needed for later store.
+ __ Push(rax); // Copy of key, needed for later store.
EmitKeyedPropertyLoad(prop);
}
}
@@ -4354,7 +4376,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(rax);
+ __ Push(rax);
break;
case NAMED_PROPERTY:
__ movp(Operand(rsp, kPointerSize), rax);
@@ -4389,7 +4411,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(rax);
+ __ Push(rax);
break;
case NAMED_PROPERTY:
__ movp(Operand(rsp, kPointerSize), rax);
@@ -4409,9 +4431,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ movp(rdx, rax);
__ Move(rax, Smi::FromInt(1));
BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- NOT_CONTEXTUAL,
- expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4441,8 +4461,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
case NAMED_PROPERTY: {
__ Move(rcx, prop->key()->AsLiteral()->value());
- __ pop(rdx);
- CallStoreIC(NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ __ Pop(rdx);
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4454,12 +4474,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ pop(rcx);
- __ pop(rdx);
- Handle<Code> ic = is_classic_mode()
+ __ Pop(rcx);
+ __ Pop(rdx);
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4480,7 +4500,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ Move(rcx, proxy->name());
__ movp(rax, GlobalObjectOperand());
// Use a regular load, not a contextual load, to avoid a reference
@@ -4489,6 +4509,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
PrepareForBailout(expr, TOS_REG);
context()->Plug(rax);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4496,9 +4517,9 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- __ push(rsi);
+ __ Push(rsi);
__ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
@@ -4621,7 +4642,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ testq(rax, rax);
+ __ testp(rax, rax);
// The stub returns 0 for true.
Split(zero, if_true, if_false, fall_through);
break;
@@ -4630,16 +4651,16 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
Condition cc = CompareIC::ComputeCondition(op);
- __ pop(rdx);
+ __ Pop(rdx);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
__ movp(rcx, rdx);
- __ or_(rcx, rax);
+ __ orp(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
}
@@ -4647,11 +4668,11 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ testq(rax, rax);
+ __ testp(rax, rax);
Split(cc, if_true, if_false, fall_through);
}
}
@@ -4682,8 +4703,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(equal, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
- __ testq(rax, rax);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ __ testp(rax, rax);
Split(not_zero, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
@@ -4730,10 +4751,10 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
- __ push(ContextOperand(rsi, Context::CLOSURE_INDEX));
+ __ Push(ContextOperand(rsi, Context::CLOSURE_INDEX));
} else {
ASSERT(declaration_scope->is_function_scope());
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
}
@@ -4748,29 +4769,29 @@ void FullCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
__ PopReturnAddressTo(rdx);
__ Move(rcx, masm_->CodeObject());
- __ subq(rdx, rcx);
+ __ subp(rdx, rcx);
__ Integer32ToSmi(rdx, rdx);
- __ push(rdx);
+ __ Push(rdx);
// Store result register while executing finally block.
- __ push(result_register());
+ __ Push(result_register());
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ Load(rdx, pending_message_obj);
- __ push(rdx);
+ __ Push(rdx);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ Load(rdx, has_pending_message);
__ Integer32ToSmi(rdx, rdx);
- __ push(rdx);
+ __ Push(rdx);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ Load(rdx, pending_message_script);
- __ push(rdx);
+ __ Push(rdx);
}
@@ -4778,30 +4799,30 @@ void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Restore pending message from stack.
- __ pop(rdx);
+ __ Pop(rdx);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ Store(pending_message_script, rdx);
- __ pop(rdx);
+ __ Pop(rdx);
__ SmiToInteger32(rdx, rdx);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ Store(has_pending_message, rdx);
- __ pop(rdx);
+ __ Pop(rdx);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ Store(pending_message_obj, rdx);
// Restore result register from stack.
- __ pop(result_register());
+ __ Pop(result_register());
// Uncook return address.
- __ pop(rdx);
+ __ Pop(rdx);
__ SmiToInteger32(rdx, rdx);
__ Move(rcx, masm_->CodeObject());
- __ addq(rdx, rcx);
+ __ addp(rdx, rcx);
__ jmp(rdx);
}
@@ -4876,6 +4897,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
}
Assembler::set_target_address_at(call_target_address,
+ unoptimized_code,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
@@ -4893,20 +4915,23 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
if (*jns_instr_address == kJnsInstruction) {
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
- Assembler::target_address_at(call_target_address));
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
return INTERRUPT;
}
ASSERT_EQ(kNopByteOne, *jns_instr_address);
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- if (Assembler::target_address_at(call_target_address) ==
+ if (Assembler::target_address_at(call_target_address,
+ unoptimized_code) ==
isolate->builtins()->OnStackReplacement()->entry()) {
return ON_STACK_REPLACEMENT;
}
ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
- Assembler::target_address_at(call_target_address));
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
return OSR_AFTER_STACK_CHECK;
}
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index c76eca04d..ea118d076 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -212,7 +212,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Store the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ lea(scratch1, Operand(elements,
+ __ leap(scratch1, Operand(elements,
scratch1,
times_pointer_size,
kValueOffset - kHeapObjectTag));
@@ -424,9 +424,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
__ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
__ shr(rdi, Immediate(String::kHashShift));
- __ xor_(rcx, rdi);
+ __ xorp(rcx, rdi);
int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
- __ and_(rcx, Immediate(mask));
+ __ andp(rcx, Immediate(mask));
// Load the key (consisting of map and internalized string) from the cache and
// check for match.
@@ -442,17 +442,17 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
__ LoadAddress(kScratchRegister, cache_keys);
int off = kPointerSize * i * 2;
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
+ __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &try_next_entry);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+ __ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(equal, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
+ __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &slow);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+ __ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(not_equal, &slow);
// Get field offset, which is a 32-bit integer.
@@ -467,8 +467,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
}
__ LoadAddress(kScratchRegister, cache_field_offsets);
__ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
- __ subq(rdi, rcx);
+ __ movzxbp(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
+ __ subp(rdi, rcx);
__ j(above_equal, &property_array_property);
if (i != 0) {
__ jmp(&load_in_object_property);
@@ -477,8 +477,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load in-object property.
__ bind(&load_in_object_property);
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
- __ addq(rcx, rdi);
+ __ movzxbp(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
+ __ addp(rcx, rdi);
__ movp(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
@@ -571,8 +571,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// Everything is fine, call runtime.
__ PopReturnAddressTo(rcx);
- __ push(rdx); // receiver
- __ push(rax); // key
+ __ Push(rdx); // receiver
+ __ Push(rax); // key
__ PushReturnAddressFrom(rcx);
// Perform tail call to the entry.
@@ -734,7 +734,7 @@ static void KeyedStoreGenerateGenericHelper(
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -852,14 +852,14 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load the elements into scratch1 and check its map. If not, jump
// to the unmapped lookup with the parameter map in scratch1.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
__ movp(scratch1, FieldOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
// Check if element is in the range of mapped arguments.
__ movp(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
__ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
- __ cmpq(key, scratch2);
+ __ cmpp(key, scratch2);
__ j(greater_equal, unmapped_case);
// Load element index and check whether it is the hole.
@@ -899,7 +899,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
__ movp(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
- __ cmpq(key, scratch);
+ __ cmpp(key, scratch);
__ j(greater_equal, slow_case);
__ SmiToInteger64(scratch, key);
return FieldOperand(backing_store,
@@ -909,7 +909,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
}
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -934,7 +934,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -945,7 +945,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
Operand mapped_location = GenerateMappedArgumentsLookup(
masm, rdx, rcx, rbx, rdi, r8, &notin, &slow);
__ movp(mapped_location, rax);
- __ lea(r9, mapped_location);
+ __ leap(r9, mapped_location);
__ movp(r8, rax);
__ RecordWrite(rbx,
r9,
@@ -959,7 +959,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
Operand unmapped_location =
GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow);
__ movp(unmapped_location, rax);
- __ lea(r9, unmapped_location);
+ __ leap(r9, unmapped_location);
__ movp(r8, rax);
__ RecordWrite(rbx,
r9,
@@ -973,8 +973,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_state) {
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -982,9 +981,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_state,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rax, rcx, rbx, rdx);
@@ -1024,8 +1021,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(counters->load_miss(), 1);
__ PopReturnAddressTo(rbx);
- __ push(rax); // receiver
- __ push(rcx); // name
+ __ Push(rax); // receiver
+ __ Push(rcx); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1043,8 +1040,8 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rax); // receiver
- __ push(rcx); // name
+ __ Push(rax); // receiver
+ __ Push(rcx); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1063,8 +1060,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(counters->keyed_load_miss(), 1);
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
+ __ Push(rdx); // receiver
+ __ Push(rax); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1082,8 +1079,8 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
+ __ Push(rdx); // receiver
+ __ Push(rax); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1091,8 +1088,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -1101,9 +1097,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_ic_state,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rdx, rcx, rbx, no_reg);
@@ -1121,9 +1115,9 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // name
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // name
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1157,7 +1151,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -1165,9 +1159,9 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
// -- rsp[0] : return address
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx);
- __ push(rcx);
- __ push(rax);
+ __ Push(rdx);
+ __ Push(rcx);
+ __ Push(rax);
__ Push(Smi::FromInt(NONE)); // PropertyAttributes
__ Push(Smi::FromInt(strict_mode));
__ PushReturnAddressFrom(rbx);
@@ -1178,7 +1172,7 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -1187,9 +1181,9 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ Push(Smi::FromInt(NONE)); // PropertyAttributes
__ Push(Smi::FromInt(strict_mode)); // Strict mode.
__ PushReturnAddressFrom(rbx);
@@ -1208,9 +1202,9 @@ void StoreIC::GenerateSlow(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
@@ -1228,9 +1222,9 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
@@ -1248,9 +1242,9 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 2cb09325f..894a4dd3a 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -87,7 +87,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- RegisterDependentCodeForEmbeddedMaps(code);
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
info()->CommitDependencies(code);
}
@@ -154,10 +154,10 @@ bool LCodeGen::GeneratePrologue() {
}
#endif
- // Classic mode functions need to replace the receiver with the global proxy
+ // Sloppy mode functions need to replace the receiver with the global proxy
// when called as functions (without an explicit receiver object).
if (info_->this_has_uses() &&
- info_->is_classic_mode() &&
+ info_->strict_mode() == SLOPPY &&
!info_->is_native()) {
Label ok;
StackArgumentsAccessor args(rsp, scope()->num_parameters());
@@ -187,11 +187,11 @@ bool LCodeGen::GeneratePrologue() {
int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
- __ subq(rsp, Immediate(slots * kPointerSize));
+ __ subp(rsp, Immediate(slots * kPointerSize));
#ifdef _MSC_VER
MakeSureStackPagesMapped(slots * kPointerSize);
#endif
- __ push(rax);
+ __ Push(rax);
__ Set(rax, slots);
__ movq(kScratchRegister, kSlotsZapValue);
Label loop;
@@ -200,9 +200,9 @@ bool LCodeGen::GeneratePrologue() {
kScratchRegister);
__ decl(rax);
__ j(not_zero, &loop);
- __ pop(rax);
+ __ Pop(rax);
} else {
- __ subq(rsp, Immediate(slots * kPointerSize));
+ __ subp(rsp, Immediate(slots * kPointerSize));
#ifdef _MSC_VER
MakeSureStackPagesMapped(slots * kPointerSize);
#endif
@@ -222,8 +222,8 @@ bool LCodeGen::GeneratePrologue() {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ push(rdi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in rax. It replaces the context passed to us.
@@ -269,17 +269,36 @@ void LCodeGen::GenerateOsrPrologue() {
// optimized frame.
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
ASSERT(slots >= 0);
- __ subq(rsp, Immediate(slots * kPointerSize));
+ __ subp(rsp, Immediate(slots * kPointerSize));
}
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
if (!instr->IsLazyBailout() && !instr->IsGap()) {
safepoints_.BumpLastLazySafepointIndex();
}
}
+void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
+ if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
+ if (instr->result()->IsRegister()) {
+ Register result_reg = ToRegister(instr->result());
+ __ movsxlq(result_reg, result_reg);
+ } else {
+ // Sign extend the 32bit result in the stack slots.
+ ASSERT(instr->result()->IsStackSlot());
+ Operand src = ToOperand(instr->result());
+ __ movsxlq(kScratchRegister, src);
+ __ movq(src, kScratchRegister);
+ }
+ }
+}
+
+
bool LCodeGen::GenerateJumpTable() {
Label needs_frame;
if (jump_table_.length() > 0) {
@@ -303,15 +322,15 @@ bool LCodeGen::GenerateJumpTable() {
} else {
__ bind(&needs_frame);
__ movp(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
- __ push(rbp);
+ __ pushq(rbp);
__ movp(rbp, rsp);
- __ push(rsi);
+ __ Push(rsi);
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ Move(rsi, Smi::FromInt(StackFrame::STUB));
- __ push(rsi);
+ __ Push(rsi);
__ movp(rsi, MemOperand(rsp, kPointerSize));
__ call(kScratchRegister);
}
@@ -335,7 +354,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -349,10 +369,10 @@ bool LCodeGen::GenerateDeferredCode() {
ASSERT(info()->IsStub());
frame_is_built_ = true;
// Build the frame in such a way that esi isn't trashed.
- __ push(rbp); // Caller's frame pointer.
- __ push(Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ pushq(rbp); // Caller's frame pointer.
+ __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
__ Push(Smi::FromInt(StackFrame::STUB));
- __ lea(rbp, Operand(rsp, 2 * kPointerSize));
+ __ leap(rbp, Operand(rsp, 2 * kPointerSize));
Comment(";;; Deferred code");
}
code->Generate();
@@ -362,7 +382,7 @@ bool LCodeGen::GenerateDeferredCode() {
ASSERT(frame_is_built_);
frame_is_built_ = false;
__ movp(rsp, rbp);
- __ pop(rbp);
+ __ popq(rbp);
}
__ jmp(code->exit());
}
@@ -405,20 +425,18 @@ XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
- return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
}
-bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
+bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsSmi();
+ chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
}
-bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
- return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsTagged();
+bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
}
@@ -577,10 +595,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -725,7 +739,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
ExternalReference count = ExternalReference::stress_deopt_count(isolate());
Label no_deopt;
__ pushfq();
- __ push(rax);
+ __ Push(rax);
Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
__ movl(rax, count_operand);
__ subl(rax, Immediate(1));
@@ -733,13 +747,13 @@ void LCodeGen::DeoptimizeIf(Condition cc,
if (FLAG_trap_on_deopt) __ int3();
__ movl(rax, Immediate(FLAG_deopt_every_n_times));
__ movl(count_operand, rax);
- __ pop(rax);
+ __ Pop(rax);
__ popfq();
ASSERT(frame_is_built_);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&no_deopt);
__ movl(count_operand, rax);
- __ pop(rax);
+ __ Pop(rax);
__ popfq();
}
@@ -798,6 +812,14 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -985,281 +1007,324 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
}
-void LCodeGen::DoModI(LModI* instr) {
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
HMod* hmod = instr->hydrogen();
- HValue* left = hmod->left();
- HValue* right = hmod->right();
- if (hmod->RightIsPowerOf2()) {
- // TODO(svenpanne) We should really do the strength reduction on the
- // Hydrogen level.
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(ToRegister(instr->result())));
-
- // Note: The code below even works when right contains kMinInt.
- int32_t divisor = Abs(right->GetInteger32Constant());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ testl(left_reg, left_reg);
- __ j(not_sign, &left_is_not_negative, Label::kNear);
- __ negl(left_reg);
- __ andl(left_reg, Immediate(divisor - 1));
- __ negl(left_reg);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ jmp(&done, Label::kNear);
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ testl(dividend, dividend);
+ __ j(not_sign, &dividend_is_not_negative, Label::kNear);
+ // Note that this is correct even for kMinInt operands.
+ __ negl(dividend);
+ __ andl(dividend, Immediate(mask));
+ __ negl(dividend);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
}
+ __ jmp(&done, Label::kNear);
+ }
- __ bind(&left_is_not_negative);
- __ andl(left_reg, Immediate(divisor - 1));
- __ bind(&done);
- } else {
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(rax));
- Register right_reg = ToRegister(instr->right());
- ASSERT(!right_reg.is(rax));
- ASSERT(!right_reg.is(rdx));
- Register result_reg = ToRegister(instr->result());
- ASSERT(result_reg.is(rdx));
+ __ bind(&dividend_is_not_negative);
+ __ andl(dividend, Immediate(mask));
+ __ bind(&done);
+}
- Label done;
- // Check for x % 0, idiv would signal a divide error. We have to
- // deopt in this case because we can't return a NaN.
- if (right->CanBeZero()) {
- __ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for kMinInt % -1, idiv would signal a divide error. We
- // have to deopt if we care about -0, because we can't return that.
- if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
- Label no_overflow_possible;
- __ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &no_overflow_possible, Label::kNear);
- __ cmpl(right_reg, Immediate(-1));
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr->environment());
- } else {
- __ j(not_equal, &no_overflow_possible, Label::kNear);
- __ Set(result_reg, 0);
- __ jmp(&done, Label::kNear);
- }
- __ bind(&no_overflow_possible);
- }
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(rax));
- // Sign extend dividend in eax into edx:eax, since we are using only the low
- // 32 bits of the values.
- __ cdq();
-
- // If we care about -0, test if the dividend is <0 and the result is 0.
- if (left->CanBeNegative() &&
- hmod->CanBeZero() &&
- hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label positive_left;
- __ testl(left_reg, left_reg);
- __ j(not_sign, &positive_left, Label::kNear);
- __ idivl(right_reg);
- __ testl(result_reg, result_reg);
- DeoptimizeIf(zero, instr->environment());
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(dividend, Abs(divisor));
+ __ imull(rdx, rdx, Immediate(Abs(divisor)));
+ __ movl(rax, dividend);
+ __ subl(rax, rdx);
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ j(not_zero, &remainder_not_zero, Label::kNear);
+ __ cmpl(dividend, Immediate(0));
+ DeoptimizeIf(less, instr->environment());
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+
+ Register left_reg = ToRegister(instr->left());
+ ASSERT(left_reg.is(rax));
+ Register right_reg = ToRegister(instr->right());
+ ASSERT(!right_reg.is(rax));
+ ASSERT(!right_reg.is(rdx));
+ Register result_reg = ToRegister(instr->result());
+ ASSERT(result_reg.is(rdx));
+
+ Label done;
+ // Check for x % 0, idiv would signal a divide error. We have to
+ // deopt in this case because we can't return a NaN.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(right_reg, right_reg);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for kMinInt % -1, idiv would signal a divide error. We
+ // have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ cmpl(left_reg, Immediate(kMinInt));
+ __ j(not_zero, &no_overflow_possible, Label::kNear);
+ __ cmpl(right_reg, Immediate(-1));
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ Set(result_reg, 0);
__ jmp(&done, Label::kNear);
- __ bind(&positive_left);
}
+ __ bind(&no_overflow_possible);
+ }
+
+ // Sign extend dividend in eax into edx:eax, since we are using only the low
+ // 32 bits of the values.
+ __ cdq();
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label positive_left;
+ __ testl(left_reg, left_reg);
+ __ j(not_sign, &positive_left, Label::kNear);
__ idivl(right_reg);
- __ bind(&done);
+ __ testl(result_reg, result_reg);
+ DeoptimizeIf(zero, instr->environment());
+ __ jmp(&done, Label::kNear);
+ __ bind(&positive_left);
}
+ __ idivl(right_reg);
+ __ bind(&done);
}
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- ASSERT(instr->right()->IsConstantOperand());
-
- const Register dividend = ToRegister(instr->left());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- const Register result = ToRegister(instr->result());
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
- switch (divisor) {
- case 0:
- DeoptimizeIf(no_condition, instr->environment());
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ if (divisor == 1) return;
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ sarl(dividend, Immediate(shift));
return;
+ }
- case 1:
- if (!result.is(dividend)) {
- __ movl(result, dividend);
+ // If the divisor is negative, we have to negate and handle edge cases.
+ Label not_kmin_int, done;
+ __ negl(dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ // Note that we could emit branch-free code, but that would need one more
+ // register.
+ __ j(no_overflow, &not_kmin_int, Label::kNear);
+ if (divisor == -1) {
+ DeoptimizeIf(no_condition, instr->environment());
+ } else {
+ __ movl(dividend, Immediate(kMinInt / divisor));
+ __ jmp(&done, Label::kNear);
}
- return;
+ }
+ __ bind(&not_kmin_int);
+ __ sarl(dividend, Immediate(shift));
+ __ bind(&done);
+}
- case -1:
- if (!result.is(dividend)) {
- __ movl(result, dividend);
- }
- __ negl(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(rdx));
+
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
return;
}
- uint32_t divisor_abs = abs(divisor);
- if (IsPowerOf2(divisor_abs)) {
- int32_t power = WhichPowerOf2(divisor_abs);
- if (divisor < 0) {
- __ movsxlq(result, dividend);
- __ neg(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ sar(result, Immediate(power));
- } else {
- if (!result.is(dividend)) {
- __ movl(result, dividend);
- }
- __ sarl(result, Immediate(power));
- }
- } else {
- Register reg1 = ToRegister(instr->temp());
- Register reg2 = ToRegister(instr->result());
-
- // Find b which: 2^b < divisor_abs < 2^(b+1).
- unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
- unsigned shift = 32 + b; // Precision +1bit (effectively).
- double multiplier_f =
- static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
- int64_t multiplier;
- if (multiplier_f - std::floor(multiplier_f) < 0.5) {
- multiplier = static_cast<int64_t>(std::floor(multiplier_f));
- } else {
- multiplier = static_cast<int64_t>(std::floor(multiplier_f)) + 1;
- }
- // The multiplier is a uint32.
- ASSERT(multiplier > 0 &&
- multiplier < (static_cast<int64_t>(1) << 32));
- // The multiply is int64, so sign-extend to r64.
- __ movsxlq(reg1, dividend);
- if (divisor < 0 &&
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ neg(reg1);
- DeoptimizeIf(zero, instr->environment());
- }
- __ Set(reg2, multiplier);
- // Result just fit in r64, because it's int32 * uint32.
- __ imul(reg2, reg1);
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
- __ addq(reg2, Immediate(1 << 30));
- __ sar(reg2, Immediate(shift));
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ negl(rdx);
+ return;
}
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp3());
+ ASSERT(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
+ Label needs_adjustment, done;
+ __ cmpl(dividend, Immediate(0));
+ __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ negl(rdx);
+ __ jmp(&done, Label::kNear);
+ __ bind(&needs_adjustment);
+ __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(temp, Abs(divisor));
+ if (divisor < 0) __ negl(rdx);
+ __ decl(rdx);
+ __ bind(&done);
}
-void LCodeGen::DoDivI(LDivI* instr) {
- if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
- Register dividend = ToRegister(instr->left());
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmpl(dividend, Immediate(kMinInt));
- DeoptimizeIf(zero, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ cmpl(dividend, Immediate(kMinInt));
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ testl(dividend, Immediate(mask));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ Move(result, dividend);
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (shift > 0) {
+ // The arithmetic shift is always OK, the 'if' is an optimization only.
+ if (shift > 1) __ sarl(result, Immediate(31));
+ __ shrl(result, Immediate(32 - shift));
+ __ addl(result, dividend);
+ __ sarl(result, Immediate(shift));
+ }
+ if (divisor < 0) __ negl(result);
+}
- if (test_value != 0) {
- if (instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- Label done, negative;
- __ cmpl(dividend, Immediate(0));
- __ j(less, &negative, Label::kNear);
- __ sarl(dividend, Immediate(power));
- if (divisor < 0) __ negl(dividend);
- __ jmp(&done, Label::kNear);
-
- __ bind(&negative);
- __ negl(dividend);
- __ sarl(dividend, Immediate(power));
- if (divisor > 0) __ negl(dividend);
- __ bind(&done);
- return; // Don't fall through to "__ neg" below.
- } else {
- // Deoptimize if remainder is not 0.
- __ testl(dividend, Immediate(test_value));
- DeoptimizeIf(not_zero, instr->environment());
- __ sarl(dividend, Immediate(power));
- }
- }
- if (divisor < 0) __ negl(dividend);
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(rdx));
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
return;
}
- LOperand* right = instr->right();
- ASSERT(ToRegister(instr->result()).is(rax));
- ASSERT(ToRegister(instr->left()).is(rax));
- ASSERT(!ToRegister(instr->right()).is(rax));
- ASSERT(!ToRegister(instr->right()).is(rdx));
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ negp(rdx);
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ movl(rax, rdx);
+ __ imull(rax, rax, Immediate(divisor));
+ __ subl(rax, dividend);
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+}
+
- Register left_reg = rax;
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->left());
+ Register divisor = ToRegister(instr->right());
+ Register remainder = ToRegister(instr->temp());
+ Register result = ToRegister(instr->result());
+ ASSERT(dividend.is(rax));
+ ASSERT(remainder.is(rdx));
+ ASSERT(result.is(rax));
+ ASSERT(!divisor.is(rax));
+ ASSERT(!divisor.is(rdx));
// Check for x / 0.
- Register right_reg = ToRegister(right);
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ testl(right_reg, right_reg);
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(divisor, divisor);
DeoptimizeIf(zero, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ testl(left_reg, left_reg);
- __ j(not_zero, &left_not_zero, Label::kNear);
- __ testl(right_reg, right_reg);
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ testl(dividend, dividend);
+ __ j(not_zero, &dividend_not_zero, Label::kNear);
+ __ testl(divisor, divisor);
DeoptimizeIf(sign, instr->environment());
- __ bind(&left_not_zero);
+ __ bind(&dividend_not_zero);
}
// Check for (kMinInt / -1).
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &left_not_min_int, Label::kNear);
- __ cmpl(right_reg, Immediate(-1));
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ cmpl(dividend, Immediate(kMinInt));
+ __ j(not_zero, &dividend_not_min_int, Label::kNear);
+ __ cmpl(divisor, Immediate(-1));
DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
+ __ bind(&dividend_not_min_int);
}
- // Sign extend to rdx.
+ // Sign extend to rdx (= remainder).
__ cdq();
- __ idivl(right_reg);
+ __ idivl(divisor);
- if (instr->is_flooring()) {
+ if (hdiv->IsMathFloorOfDiv()) {
Label done;
- __ testl(rdx, rdx);
+ __ testl(remainder, remainder);
__ j(zero, &done, Label::kNear);
- __ xorl(rdx, right_reg);
- __ sarl(rdx, Immediate(31));
- __ addl(rax, rdx);
+ __ xorl(remainder, divisor);
+ __ sarl(remainder, Immediate(31));
+ __ addl(result, remainder);
__ bind(&done);
- } else if (!instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
+ } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
- __ testl(rdx, rdx);
+ __ testl(remainder, remainder);
DeoptimizeIf(not_zero, instr->environment());
}
}
@@ -1323,14 +1388,14 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ SmiToInteger64(left, left);
- __ imul(left, ToOperand(right));
+ __ imulp(left, ToOperand(right));
} else {
__ imull(left, ToOperand(right));
}
} else {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ SmiToInteger64(left, left);
- __ imul(left, ToRegister(right));
+ __ imulp(left, ToRegister(right));
} else {
__ imull(left, ToRegister(right));
}
@@ -1344,7 +1409,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
// Bail out if the result is supposed to be negative zero.
Label done;
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ testq(left, left);
+ __ testp(left, left);
} else {
__ testl(left, left);
}
@@ -1360,7 +1425,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ or_(kScratchRegister, ToOperand(right));
+ __ orp(kScratchRegister, ToOperand(right));
} else {
__ orl(kScratchRegister, ToOperand(right));
}
@@ -1368,7 +1433,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
// Test the non-zero operand for negative sign.
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ or_(kScratchRegister, ToRegister(right));
+ __ orp(kScratchRegister, ToRegister(right));
} else {
__ orl(kScratchRegister, ToRegister(right));
}
@@ -1408,13 +1473,13 @@ void LCodeGen::DoBitI(LBitI* instr) {
} else if (right->IsStackSlot()) {
switch (instr->op()) {
case Token::BIT_AND:
- __ and_(ToRegister(left), ToOperand(right));
+ __ andp(ToRegister(left), ToOperand(right));
break;
case Token::BIT_OR:
- __ or_(ToRegister(left), ToOperand(right));
+ __ orp(ToRegister(left), ToOperand(right));
break;
case Token::BIT_XOR:
- __ xor_(ToRegister(left), ToOperand(right));
+ __ xorp(ToRegister(left), ToOperand(right));
break;
default:
UNREACHABLE();
@@ -1424,13 +1489,13 @@ void LCodeGen::DoBitI(LBitI* instr) {
ASSERT(right->IsRegister());
switch (instr->op()) {
case Token::BIT_AND:
- __ and_(ToRegister(left), ToRegister(right));
+ __ andp(ToRegister(left), ToRegister(right));
break;
case Token::BIT_OR:
- __ or_(ToRegister(left), ToRegister(right));
+ __ orp(ToRegister(left), ToRegister(right));
break;
case Token::BIT_XOR:
- __ xor_(ToRegister(left), ToRegister(right));
+ __ xorp(ToRegister(left), ToRegister(right));
break;
default:
UNREACHABLE();
@@ -1518,13 +1583,13 @@ void LCodeGen::DoSubI(LSubI* instr) {
Immediate(ToInteger32(LConstantOperand::cast(right))));
} else if (right->IsRegister()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ subq(ToRegister(left), ToRegister(right));
+ __ subp(ToRegister(left), ToRegister(right));
} else {
__ subl(ToRegister(left), ToRegister(right));
}
} else {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ subq(ToRegister(left), ToOperand(right));
+ __ subp(ToRegister(left), ToOperand(right));
} else {
__ subl(ToRegister(left), ToOperand(right));
}
@@ -1601,7 +1666,7 @@ void LCodeGen::DoDateField(LDateField* instr) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
__ movp(kScratchRegister, stamp_operand);
- __ cmpq(kScratchRegister, FieldOperand(object,
+ __ cmpp(kScratchRegister, FieldOperand(object,
JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
__ movp(result, FieldOperand(object, JSDate::kValueOffset +
@@ -1642,17 +1707,17 @@ void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
Register string = ToRegister(instr->string());
if (FLAG_debug_code) {
- __ push(string);
+ __ Push(string);
__ movp(string, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbq(string, FieldOperand(string, Map::kInstanceTypeOffset));
+ __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
__ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmpq(string, Immediate(encoding == String::ONE_BYTE_ENCODING
+ __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
__ Check(equal, kUnexpectedStringType);
- __ pop(string);
+ __ Pop(string);
}
Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
@@ -1706,44 +1771,44 @@ void LCodeGen::DoAddI(LAddI* instr) {
LOperand* right = instr->right();
Representation target_rep = instr->hydrogen()->representation();
- bool is_q = target_rep.IsSmi() || target_rep.IsExternal();
+ bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
if (right->IsConstantOperand()) {
int32_t offset = ToInteger32(LConstantOperand::cast(right));
- if (is_q) {
- __ lea(ToRegister(instr->result()),
- MemOperand(ToRegister(left), offset));
+ if (is_p) {
+ __ leap(ToRegister(instr->result()),
+ MemOperand(ToRegister(left), offset));
} else {
__ leal(ToRegister(instr->result()),
MemOperand(ToRegister(left), offset));
}
} else {
Operand address(ToRegister(left), ToRegister(right), times_1, 0);
- if (is_q) {
- __ lea(ToRegister(instr->result()), address);
+ if (is_p) {
+ __ leap(ToRegister(instr->result()), address);
} else {
__ leal(ToRegister(instr->result()), address);
}
}
} else {
if (right->IsConstantOperand()) {
- if (is_q) {
- __ addq(ToRegister(left),
+ if (is_p) {
+ __ addp(ToRegister(left),
Immediate(ToInteger32(LConstantOperand::cast(right))));
} else {
__ addl(ToRegister(left),
Immediate(ToInteger32(LConstantOperand::cast(right))));
}
} else if (right->IsRegister()) {
- if (is_q) {
- __ addq(ToRegister(left), ToRegister(right));
+ if (is_p) {
+ __ addp(ToRegister(left), ToRegister(right));
} else {
__ addl(ToRegister(left), ToRegister(right));
}
} else {
- if (is_q) {
- __ addq(ToRegister(left), ToOperand(right));
+ if (is_p) {
+ __ addp(ToRegister(left), ToOperand(right));
} else {
__ addl(ToRegister(left), ToOperand(right));
}
@@ -1776,7 +1841,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
} else if (right->IsRegister()) {
Register right_reg = ToRegister(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmpq(left_reg, right_reg);
+ __ cmpp(left_reg, right_reg);
} else {
__ cmpl(left_reg, right_reg);
}
@@ -1785,7 +1850,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
} else {
Operand right_op = ToOperand(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmpq(left_reg, right_op);
+ __ cmpp(left_reg, right_op);
} else {
__ cmpl(left_reg, right_op);
}
@@ -1924,7 +1989,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (r.IsSmi()) {
ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
- __ testq(reg, reg);
+ __ testp(reg, reg);
EmitBranch(instr, not_zero);
} else if (r.IsDouble()) {
ASSERT(!info()->IsStub());
@@ -1956,7 +2021,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
ASSERT(!info()->IsStub());
- __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
EmitBranch(instr, not_equal);
} else {
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
@@ -2016,7 +2081,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string, Label::kNear);
- __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
__ j(not_zero, instr->TrueLabel(chunk_));
__ jmp(instr->FalseLabel(chunk_));
__ bind(&not_string);
@@ -2139,9 +2204,9 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
cc = ReverseCondition(cc);
} else if (instr->hydrogen_value()->representation().IsSmi()) {
if (right->IsRegister()) {
- __ cmpq(ToRegister(left), ToRegister(right));
+ __ cmpp(ToRegister(left), ToRegister(right));
} else {
- __ cmpq(ToRegister(left), ToOperand(right));
+ __ cmpp(ToRegister(left), ToOperand(right));
}
} else {
if (right->IsRegister()) {
@@ -2164,7 +2229,7 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
__ Cmp(left, right);
} else {
Register right = ToRegister(instr->right());
- __ cmpq(left, right);
+ __ cmpp(left, right);
}
EmitBranch(instr, equal);
}
@@ -2182,9 +2247,9 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
__ ucomisd(input_reg, input_reg);
EmitFalseBranch(instr, parity_odd);
- __ subq(rsp, Immediate(kDoubleSize));
+ __ subp(rsp, Immediate(kDoubleSize));
__ movsd(MemOperand(rsp, 0), input_reg);
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
int offset = sizeof(kHoleNanUpper32);
__ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
@@ -2210,8 +2275,8 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
__ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
__ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
- Immediate(0x80000000));
- EmitFalseBranch(instr, not_equal);
+ Immediate(0x1));
+ EmitFalseBranch(instr, no_overflow);
__ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
Immediate(0x00000000));
EmitBranch(instr, equal);
@@ -2318,7 +2383,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
- __ testq(rax, rax);
+ __ testp(rax, rax);
EmitBranch(instr, condition);
}
@@ -2411,8 +2476,8 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// actual type and do a signed compare with the width of the type range.
__ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
__ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(above, is_false);
}
@@ -2470,11 +2535,11 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
InstanceofStub stub(InstanceofStub::kNoFlags);
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
+ __ Push(ToRegister(instr->left()));
+ __ Push(ToRegister(instr->right()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(zero, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
@@ -2520,7 +2585,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ bind(deferred->map_check()); // Label for calculating code patching.
Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
__ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
- __ cmpq(map, Operand(kScratchRegister, 0));
+ __ cmpp(map, Operand(kScratchRegister, 0));
__ j(not_equal, &cache_miss, Label::kNear);
// Patched to load either true or false.
__ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
@@ -2557,14 +2622,14 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
InstanceofStub stub(flags);
- __ push(ToRegister(instr->value()));
+ __ Push(ToRegister(instr->value()));
__ Push(instr->function());
static const int kAdditionalDelta = 10;
int delta =
masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
ASSERT(delta >= 0);
- __ push_imm32(delta);
+ __ PushImm32(delta);
// We are pushing three values on the stack but recording a
// safepoint with two arguments because stub is going to
@@ -2582,7 +2647,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// PushSafepointRegisterScope.
__ movp(kScratchRegister, rax);
}
- __ testq(kScratchRegister, kScratchRegister);
+ __ testp(kScratchRegister, kScratchRegister);
Label load_false;
Label done;
__ j(not_zero, &load_false, Label::kNear);
@@ -2603,7 +2668,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
Condition condition = TokenToCondition(op, false);
Label true_value, done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(condition, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
@@ -2619,7 +2684,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
// to return the value in the same register. We're leaving the code
// managed by the register allocator and tearing down the frame, it's
// safe to write to the context register.
- __ push(rax);
+ __ Push(rax);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
@@ -2629,7 +2694,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ movp(rsp, rbp);
- __ pop(rbp);
+ __ popq(rbp);
no_frame_start = masm_->pc_offset();
}
if (instr->has_constant_parameter_count()) {
@@ -2642,7 +2707,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
__ PopReturnAddressTo(return_addr_reg);
__ shl(reg, Immediate(kPointerSizeLog2));
- __ addq(rsp, reg);
+ __ addp(rsp, reg);
__ jmp(return_addr_reg);
}
if (no_frame_start != -1) {
@@ -2785,6 +2850,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Representation representation = access.representation();
if (representation.IsSmi() &&
instr->hydrogen()->representation().IsInteger32()) {
+#ifdef DEBUG
+ Register scratch = kScratchRegister;
+ __ Load(scratch, FieldOperand(object, offset), representation);
+ __ AssertSmi(scratch);
+#endif
+
// Read int value directly from upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
@@ -2861,9 +2932,13 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
instr->index()->IsConstantOperand()) {
int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- StackArgumentsAccessor args(arguments, const_length,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(result, args.GetArgumentOperand(const_index));
+ if (const_index >= 0 && const_index < const_length) {
+ StackArgumentsAccessor args(arguments, const_length,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movp(result, args.GetArgumentOperand(const_index));
+ } else if (FLAG_debug_code) {
+ __ int3();
+ }
} else {
Register length = ToRegister(instr->length());
// There are two words between the frame pointer and the last argument.
@@ -2883,19 +2958,6 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyed (in this case) instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index argument
- // to the bounds check, which can be tagged, so that case must be
- // handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
int base_offset = instr->is_fixed_typed_array()
? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
: 0;
@@ -2925,7 +2987,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
- __ movzxbq(result, operand);
+ __ movzxbp(result, operand);
break;
case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
@@ -2933,7 +2995,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
break;
case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
- __ movzxwq(result, operand);
+ __ movzxwp(result, operand);
break;
case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
@@ -2958,7 +3020,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -2969,19 +3031,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
XMMRegister result(ToDoubleRegister(instr->result()));
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyed instructions force the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
if (instr->hydrogen()->RequiresHoleCheck()) {
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
sizeof(kHoleNanLower32);
@@ -3009,20 +3058,6 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
HLoadKeyed* hinstr = instr->hydrogen();
Register result = ToRegister(instr->result());
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that
- // case must be handled here, too.
- if (hinstr->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
bool requires_hole_check = hinstr->RequiresHoleCheck();
int offset = FixedArray::kHeaderSize - kHeapObjectTag;
Representation representation = hinstr->representation();
@@ -3030,6 +3065,17 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (representation.IsInteger32() &&
hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
ASSERT(!requires_hole_check);
+#ifdef DEBUG
+ Register scratch = kScratchRegister;
+ __ Load(scratch,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ FAST_ELEMENTS,
+ offset,
+ instr->additional_index()),
+ Representation::Smi());
+ __ AssertSmi(scratch);
+#endif
// Read int value directly from upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
@@ -3108,7 +3154,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
if (instr->hydrogen()->from_inlined()) {
- __ lea(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
+ __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
} else {
// Check for arguments adapter frame.
Label done, adapted;
@@ -3139,9 +3185,9 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
// If no arguments adaptor frame the number of arguments is fixed.
if (instr->elements()->IsRegister()) {
- __ cmpq(rbp, ToRegister(instr->elements()));
+ __ cmpp(rbp, ToRegister(instr->elements()));
} else {
- __ cmpq(rbp, ToOperand(instr->elements()));
+ __ cmpp(rbp, ToOperand(instr->elements()));
}
__ movl(result, Immediate(scope()->num_parameters()));
__ j(equal, &done, Label::kNear);
@@ -3221,10 +3267,10 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
- __ cmpq(length, Immediate(kArgumentsLimit));
+ __ cmpp(length, Immediate(kArgumentsLimit));
DeoptimizeIf(above, instr->environment());
- __ push(receiver);
+ __ Push(receiver);
__ movp(receiver, length);
// Loop through the arguments pushing them onto the execution
@@ -3236,7 +3282,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&loop);
StackArgumentsAccessor args(elements, length,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ push(args.GetArgumentOperand(0));
+ __ Push(args.GetArgumentOperand(0));
__ decl(length);
__ j(not_zero, &loop);
@@ -3281,10 +3327,10 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
- __ push(rsi); // The context is the first argument.
+ __ Push(rsi); // The context is the first argument.
__ Push(instr->hydrogen()->pairs());
__ Push(Smi::FromInt(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
}
@@ -3318,7 +3364,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
if (function.is_identical_to(info()->closure())) {
__ CallSelf();
} else {
- __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ Call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
// Set up deoptimization.
@@ -3349,7 +3395,7 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
ASSERT(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
generator.BeforeCall(__ CallSize(target));
- __ addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(target);
}
generator.AfterCall();
@@ -3383,7 +3429,7 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
} else {
Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
generator.BeforeCall(__ CallSize(target));
- __ call(target);
+ __ Call(target);
}
generator.AfterCall();
}
@@ -3416,7 +3462,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
CallRuntimeFromDeferred(
- Runtime::kAllocateHeapNumber, 0, instr, instr->context());
+ Runtime::kHiddenAllocateHeapNumber, 0, instr, instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp.is(rax)) __ movp(tmp, rax);
// Restore input_reg after call to runtime.
@@ -3446,10 +3492,10 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
- __ testq(input_reg, input_reg);
+ __ testp(input_reg, input_reg);
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
- __ neg(input_reg); // Sets flags.
+ __ negp(input_reg); // Sets flags.
DeoptimizeIf(negative, instr->environment());
__ bind(&is_positive);
}
@@ -3509,8 +3555,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
}
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, xmm_scratch);
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
+ __ cmpl(output_reg, Immediate(0x1));
+ DeoptimizeIf(overflow, instr->environment());
} else {
Label negative_sign, done;
// Deoptimize on unordered.
@@ -3534,8 +3580,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
// Use truncating instruction (OK because input is positive).
__ cvttsd2si(output_reg, input_reg);
// Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
+ __ cmpl(output_reg, Immediate(0x1));
+ DeoptimizeIf(overflow, instr->environment());
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
@@ -3572,9 +3618,9 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ addsd(xmm_scratch, input_reg);
__ cvttsd2si(output_reg, xmm_scratch);
// Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
+ __ cmpl(output_reg, Immediate(0x1));
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(overflow, instr->environment());
__ jmp(&done, dist);
__ bind(&below_one_half);
@@ -3589,9 +3635,9 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ subsd(input_temp, xmm_scratch);
__ cvttsd2si(output_reg, input_temp);
// Catch minint due to overflow, and to prevent overflow when compensating.
- __ cmpl(output_reg, Immediate(0x80000000));
+ __ cmpl(output_reg, Immediate(0x1));
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(overflow, instr->environment());
__ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
@@ -3721,17 +3767,31 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
__ jmp(&done, Label::kNear);
__ bind(&positive);
__ fldln2();
- __ subq(rsp, Immediate(kDoubleSize));
+ __ subp(rsp, Immediate(kDoubleSize));
__ movsd(Operand(rsp, 0), input_reg);
__ fld_d(Operand(rsp, 0));
__ fyl2x();
__ fstp_d(Operand(rsp, 0));
__ movsd(input_reg, Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
__ bind(&done);
}
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label not_zero_input;
+ __ bsrl(result, input);
+
+ __ j(not_zero, &not_zero_input);
+ __ Set(result, 63); // 63^31 == 32
+
+ __ bind(&not_zero_input);
+ __ xorl(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
+}
+
+
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->function()).is(rdi));
@@ -3771,8 +3831,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
__ Set(rax, instr->arity());
// No cell in ebx for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ Move(rbx, undefined_value);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3784,7 +3843,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
__ Set(rax, instr->arity());
- __ Move(rbx, factory()->undefined_value());
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -3801,7 +3860,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
// We might need a change here
// look at the first argument
__ movp(rcx, Operand(rsp, 0));
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &packed_case, Label::kNear);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
@@ -3830,7 +3889,7 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
Register function = ToRegister(instr->function());
Register code_object = ToRegister(instr->code_object());
- __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
+ __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
__ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
}
@@ -3840,10 +3899,10 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register base = ToRegister(instr->base_object());
if (instr->offset()->IsConstantOperand()) {
LConstantOperand* offset = LConstantOperand::cast(instr->offset());
- __ lea(result, Operand(base, ToInteger32(offset)));
+ __ leap(result, Operand(base, ToInteger32(offset)));
} else {
Register offset = ToRegister(instr->offset());
- __ lea(result, Operand(base, offset, times_1, 0));
+ __ leap(result, Operand(base, offset, times_1, 0));
}
}
@@ -3860,7 +3919,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
if (instr->object()->IsConstantOperand()) {
ASSERT(value.is(rax));
- ASSERT(!access.representation().IsSpecialization());
LConstantOperand* object = LConstantOperand::cast(instr->object());
__ store_rax(ToExternalReference(object));
} else {
@@ -3872,19 +3930,16 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register object = ToRegister(instr->object());
Handle<Map> transition = instr->transition();
+ SmiCheck check_needed = hinstr->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (FLAG_track_fields && representation.IsSmi()) {
+ ASSERT(!(representation.IsSmi() &&
+ instr->value()->IsConstantOperand() &&
+ !IsInteger32Constant(LConstantOperand::cast(instr->value()))));
+ if (representation.IsHeapObject()) {
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (!IsInteger32Constant(operand_value) &&
- !IsSmiConstant(operand_value)) {
- DeoptimizeIf(no_condition, instr->environment());
- }
- }
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
- if (instr->value()->IsConstantOperand()) {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (IsInteger32Constant(operand_value)) {
+ if (chunk_->LookupConstant(operand_value)->HasSmiValue()) {
DeoptimizeIf(no_condition, instr->environment());
}
} else {
@@ -3892,6 +3947,9 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
Condition cc = masm()->CheckSmi(value);
DeoptimizeIf(cc, instr->environment());
+
+ // We know that value is a smi now, so we can omit the check below.
+ check_needed = OMIT_SMI_CHECK;
}
}
} else if (representation.IsDouble()) {
@@ -3922,9 +3980,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
- SmiCheck check_needed = hinstr->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-
Register write_register = object;
if (!access.IsInobject()) {
write_register = ToRegister(instr->temp());
@@ -3934,6 +3989,11 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (representation.IsSmi() &&
hinstr->value()->representation().IsInteger32()) {
ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+#ifdef DEBUG
+ Register scratch = kScratchRegister;
+ __ Load(scratch, FieldOperand(write_register, offset), representation);
+ __ AssertSmi(scratch);
+#endif
// Store int value directly to upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
@@ -3986,8 +4046,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(rax));
__ Move(rcx, instr->hydrogen()->name());
- Handle<Code> ic = StoreIC::initialize_stub(isolate(),
- instr->strict_mode_flag());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4026,7 +4085,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
} else {
Register reg2 = ToRegister(instr->index());
if (representation.IsSmi()) {
- __ cmpq(reg, reg2);
+ __ cmpp(reg, reg2);
} else {
__ cmpl(reg, reg2);
}
@@ -4043,7 +4102,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
}
} else {
if (representation.IsSmi()) {
- __ cmpq(length, ToRegister(instr->index()));
+ __ cmpp(length, ToRegister(instr->index()));
} else {
__ cmpl(length, ToRegister(instr->index()));
}
@@ -4057,19 +4116,6 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
int base_offset = instr->is_fixed_typed_array()
? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
: 0;
@@ -4122,7 +4168,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4133,20 +4179,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
XMMRegister value = ToDoubleRegister(instr->value());
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the
- // input gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
if (instr->NeedsCanonicalization()) {
Label have_value;
@@ -4174,26 +4206,23 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
HStoreKeyed* hinstr = instr->hydrogen();
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the
- // input gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (hinstr->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
int offset = FixedArray::kHeaderSize - kHeapObjectTag;
Representation representation = hinstr->value()->representation();
if (representation.IsInteger32()) {
ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
+#ifdef DEBUG
+ Register scratch = kScratchRegister;
+ __ Load(scratch,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ FAST_ELEMENTS,
+ offset,
+ instr->additional_index()),
+ Representation::Smi());
+ __ AssertSmi(scratch);
+#endif
// Store int value directly to upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
@@ -4234,7 +4263,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
Register key_reg(ToRegister(key));
- __ lea(key_reg, operand);
+ __ leap(key_reg, operand);
__ RecordWrite(elements,
key_reg,
value,
@@ -4262,7 +4291,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(rcx));
ASSERT(ToRegister(instr->value()).is(rax));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ Handle<Code> ic = instr->strict_mode() == STRICT
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -4360,7 +4389,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ Set(result, 0);
PushSafepointRegistersScope scope(this);
- __ push(string);
+ __ Push(string);
// Push the index as a smi. This is safe because of the checks in
// DoStringCharCodeAt above.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
@@ -4370,10 +4399,10 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
} else {
Register index = ToRegister(instr->index());
__ Integer32ToSmi(index, index);
- __ push(index);
+ __ Push(index);
}
CallRuntimeFromDeferred(
- Runtime::kStringCharCodeAt, 2, instr, instr->context());
+ Runtime::kHiddenStringCharCodeAt, 2, instr, instr->context());
__ AssertSmi(rax);
__ SmiToInteger32(rax, rax);
__ StoreToSafepointRegisterSlot(result, rax);
@@ -4425,7 +4454,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ Integer32ToSmi(char_code, char_code);
- __ push(char_code);
+ __ Push(char_code);
CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
}
@@ -4444,18 +4473,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
-void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- LOperand* output = instr->result();
- __ Integer32ToSmi(ToRegister(output), ToRegister(input));
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4467,22 +4484,6 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
-void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- LOperand* output = instr->result();
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange() ||
- instr->hydrogen()->value()->range()->upper() == kMaxInt) {
- // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
- // interval, so we treat kMaxInt as a sentinel for this entire interval.
- __ testl(ToRegister(input), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
- }
- __ Integer32ToSmi(ToRegister(output), ToRegister(input));
-}
-
-
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
@@ -4518,15 +4519,11 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
- Label slow;
+ Label done, slow;
Register reg = ToRegister(instr->value());
- Register tmp = reg.is(rax) ? rcx : rax;
- XMMRegister temp_xmm = ToDoubleRegister(instr->temp());
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
+ Register tmp = ToRegister(instr->temp1());
+ XMMRegister temp_xmm = ToDoubleRegister(instr->temp2());
- Label done;
// Load value into temp_xmm which will be preserved across potential call to
// runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
// XMM registers on x64).
@@ -4540,29 +4537,31 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
+ {
+ // Put a valid pointer value in the stack slot where the result
+ // register is stored, as this register is in the pointer map, but contains
+ // an integer value.
+ __ Set(reg, 0);
- // Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ StoreToSafepointRegisterSlot(reg, Immediate(0));
-
- // NumberTagU uses the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
- if (!reg.is(rax)) __ movp(reg, rax);
+ // NumberTagU uses the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(reg, rax);
+ }
// Done. Put the value in temp_xmm into the value of the allocated heap
// number.
__ bind(&done);
__ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
- __ StoreToSafepointRegisterSlot(reg, reg);
}
@@ -4605,11 +4604,11 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
PushSafepointRegistersScope scope(this);
// NumberTagD uses the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ movp(kScratchRegister, rax);
@@ -4619,10 +4618,19 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(instr->value()->Equals(instr->result()));
+ HChange* hchange = instr->hydrogen();
Register input = ToRegister(instr->value());
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ Integer32ToSmi(input, input);
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ testl(input, input);
+ DeoptimizeIf(sign, instr->environment());
+ }
+ __ Integer32ToSmi(output, input);
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
}
@@ -4916,13 +4924,13 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
{
PushSafepointRegistersScope scope(this);
- __ push(object);
+ __ Push(object);
__ Set(rsi, 0);
__ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
RecordSafepointWithRegisters(
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
- __ testq(rax, Immediate(kSmiTagMask));
+ __ testp(rax, Immediate(kSmiTagMask));
}
DeoptimizeIf(zero, instr->environment());
}
@@ -5011,7 +5019,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// conversions.
__ Cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr->environment());
- __ movp(input_reg, Immediate(0));
+ __ xorl(input_reg, input_reg);
__ jmp(&done, Label::kNear);
// Heap number
@@ -5029,6 +5037,30 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ XMMRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ movq(result_reg, value_reg);
+ __ shr(result_reg, Immediate(32));
+ } else {
+ __ movd(result_reg, value_reg);
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ XMMRegister result_reg = ToDoubleRegister(instr->result());
+ XMMRegister xmm_scratch = double_scratch0();
+ __ movd(result_reg, hi_reg);
+ __ psllq(result_reg, 32);
+ __ movd(xmm_scratch, lo_reg);
+ __ orps(result_reg, xmm_scratch);
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
@@ -5108,7 +5140,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register size = ToRegister(instr->size());
ASSERT(!size.is(result));
__ Integer32ToSmi(size, size);
- __ push(size);
+ __ Push(size);
} else {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
__ Push(Smi::FromInt(size));
@@ -5128,14 +5160,14 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(flags));
CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
}
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
ASSERT(ToRegister(instr->value()).is(rax));
- __ push(rax);
+ __ Push(rax);
CallRuntime(Runtime::kToFastProperties, 1, instr);
}
@@ -5156,11 +5188,11 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
// Create regexp literal using runtime function
// Result will be in rax.
- __ push(rcx);
+ __ Push(rcx);
__ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
__ Push(instr->hydrogen()->pattern());
__ Push(instr->hydrogen()->flags());
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
__ movp(rbx, rax);
__ bind(&materialized);
@@ -5170,10 +5202,10 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ jmp(&allocated, Label::kNear);
__ bind(&runtime_allocate);
- __ push(rbx);
+ __ Push(rbx);
__ Push(Smi::FromInt(size));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(rbx);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
+ __ Pop(rbx);
__ bind(&allocated);
// Copy the content into the newly allocated memory.
@@ -5197,16 +5229,16 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ Move(rbx, instr->hydrogen()->shared_info());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
- __ push(rsi);
+ __ Push(rsi);
__ Push(instr->hydrogen()->shared_info());
__ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
Heap::kFalseValueRootIndex);
- CallRuntime(Runtime::kNewClosure, 3, instr);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
}
}
@@ -5224,9 +5256,9 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
if (operand->IsConstantOperand()) {
__ Push(ToHandle(LConstantOperand::cast(operand)));
} else if (operand->IsRegister()) {
- __ push(ToRegister(operand));
+ __ Push(ToRegister(operand));
} else {
- __ push(ToOperand(operand));
+ __ Push(ToOperand(operand));
}
}
@@ -5365,7 +5397,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5401,7 +5433,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
@@ -5437,10 +5469,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
@@ -5481,7 +5510,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmpq(rax, null_value);
+ __ cmpp(rax, null_value);
DeoptimizeIf(equal, instr->environment());
Condition cc = masm()->CheckSmi(rax);
@@ -5499,7 +5528,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
- __ push(rax);
+ __ Push(rax);
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
@@ -5532,7 +5561,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
- __ cmpq(ToRegister(instr->map()),
+ __ cmpp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(not_equal, instr->environment());
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 431f77b23..37807ede0 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -86,12 +86,12 @@ class LCodeGen: public LCodeGenBase {
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
+ bool IsDehoistedKeyConstant(LConstantOperand* op) const;
bool IsSmiConstant(LConstantOperand* op) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
ExternalReference ToExternalReference(LConstantOperand* op) const;
- bool IsTaggedConstant(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
@@ -130,9 +130,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
+ StrictMode strict_mode() const { return info()->strict_mode(); }
LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
@@ -160,6 +158,7 @@ class LCodeGen: public LCodeGenBase {
// Code generation passes. Returns true if code generation should
// continue.
void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateJumpTable();
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
index c3bfd9e61..7c7fc29e0 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -198,7 +198,14 @@ void LGapResolver::EmitMove(int index) {
if (cgen_->IsSmiConstant(constant_source)) {
__ Move(dst, cgen_->ToSmi(constant_source));
} else if (cgen_->IsInteger32Constant(constant_source)) {
- __ Set(dst, static_cast<uint32_t>(cgen_->ToInteger32(constant_source)));
+ int32_t constant = cgen_->ToInteger32(constant_source);
+ // Do sign extension only for constant used as de-hoisted array key.
+ // Others only need zero extension, which saves 2 bytes.
+ if (cgen_->IsDehoistedKeyConstant(constant_source)) {
+ __ Set(dst, constant);
+ } else {
+ __ Set(dst, static_cast<uint32_t>(constant));
+ }
} else {
__ Move(dst, cgen_->ToHandle(constant_source));
}
@@ -218,8 +225,7 @@ void LGapResolver::EmitMove(int index) {
if (cgen_->IsSmiConstant(constant_source)) {
__ Move(dst, cgen_->ToSmi(constant_source));
} else if (cgen_->IsInteger32Constant(constant_source)) {
- // Zero top 32 bits of a 64 bit spill slot that holds a 32 bit untagged
- // value.
+ // Do sign extension to 64 bits when stored into stack slot.
__ movp(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
__ Move(kScratchRegister, cgen_->ToHandle(constant_source));
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 1f2b1e98e..8c4f24e8f 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -175,6 +175,19 @@ bool LGoto::HasInterestingComment(LCodeGen* gen) const {
}
+template<int R>
+bool LTemplateResultInstruction<R>::MustSignExtendResult(
+ LPlatformChunk* chunk) const {
+ HValue* hvalue = this->hydrogen_value();
+
+ if (hvalue == NULL) return false;
+ if (!hvalue->representation().IsInteger32()) return false;
+ if (hvalue->HasRange() && !hvalue->range()->CanBeNegative()) return false;
+
+ return chunk->GetDehoistedKeyIds()->Contains(hvalue->id());
+}
+
+
void LGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", block_id());
}
@@ -947,18 +960,20 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
if (goto_instr != NULL) return goto_instr;
HValue* value = instr->value();
- LBranch* result = new(zone()) LBranch(UseRegister(value));
- // Tagged values that are not known smis or booleans require a
- // deoptimization environment. If the instruction is generic no
- // environment is needed since all cases are handled.
- ToBooleanStub::Types expected = instr->expected_input_types();
- Representation rep = value->representation();
+ Representation r = value->representation();
HType type = value->type();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() &&
- !expected.IsGeneric()) {
- return AssignEnvironment(result);
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+ type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+ LInstruction* branch = new(zone()) LBranch(UseRegister(value));
+ if (!easy_case &&
+ ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ !expected.IsGeneric())) {
+ branch = AssignEnvironment(branch);
}
- return result;
+ return branch;
}
@@ -1117,6 +1132,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
default:
UNREACHABLE();
return NULL;
@@ -1142,8 +1158,12 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
LOperand* context = UseAny(instr->context());
LOperand* input = UseRegisterAtStart(instr->value());
- LMathAbs* result = new(zone()) LMathAbs(context, input);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LMathAbs(context, input));
+ Representation r = instr->value()->representation();
+ if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
}
@@ -1155,6 +1175,13 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
}
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
@@ -1246,24 +1273,72 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(rax);
+ LOperand* temp2 = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LDivByConstI(
+ dividend, divisor, temp1, temp2), rdx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LDivI(
+ dividend, divisor, temp), rax);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanOverflow) ||
+ (!instr->IsMathFloorOfDiv() &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div =
- new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
}
- // The temporary operand is necessary to ensure that right is not allocated
- // into rdx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* dividend = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(result, rax));
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else {
@@ -1272,74 +1347,114 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(rax);
+ LOperand* temp2 = FixedTemp(rdx);
+ LOperand* temp3 =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result =
+ DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
+ divisor,
+ temp1,
+ temp2,
+ temp3),
+ rdx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- if (!right->IsConstant()) {
- ASSERT(right->representation().IsInteger32());
- // The temporary operand is necessary to ensure that right is not allocated
- // into rdx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* dividend = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(flooring_div, rax));
- }
-
- ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
- LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
- int32_t divisor_si = HConstant::cast(right)->Integer32Value();
- if (divisor_si == 0) {
- LOperand* dividend = UseRegister(instr->left());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL)));
- } else if (IsPowerOf2(abs(divisor_si))) {
- LOperand* dividend = UseRegisterAtStart(instr->left());
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
} else {
- // use two r64
- LOperand* dividend = UseRegisterAtStart(instr->left());
- LOperand* temp = TempRegister();
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, temp));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
+ return DoDivI(instr);
}
}
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(rax);
+ LOperand* temp2 = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LModByConstI(
+ dividend, divisor, temp1, temp2), rax);
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LModI(
+ dividend, divisor, temp), rdx);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(left->representation().Equals(instr->representation()));
- ASSERT(right->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
- ASSERT(!right->CanBeZero());
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseOrConstant(right),
- NULL);
- LInstruction* result = DefineSameAsFirst(mod);
- return (left->CanBeNegative() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
} else {
- // The temporary operand is necessary to ensure that right is not
- // allocated into edx.
- LModI* mod = new(zone()) LModI(UseFixed(left, rax),
- UseRegister(right),
- FixedTemp(rdx));
- LInstruction* result = DefineFixed(mod, rdx);
- return (right->CanBeZero() ||
- (left->RangeCanInclude(kMinInt) &&
- right->RangeCanInclude(-1) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
- (left->CanBeNegative() &&
- instr->CanBeZero() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)))
- ? AssignEnvironment(result)
- : result;
+ return DoModI(instr);
}
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MOD, instr);
@@ -1702,8 +1817,11 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new(zone()) LNumberUntagD(value);
- return AssignEnvironment(DefineAsRegister(res));
+ LInstruction* res = DefineAsRegister(new(zone()) LNumberUntagD(value));
+ if (!instr->value()->representation().IsSmi()) {
+ res = AssignEnvironment(res);
+ }
+ return res;
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
@@ -1720,8 +1838,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else {
bool truncating = instr->CanTruncateToInt32();
LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
- LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
- return AssignEnvironment(DefineSameAsFirst(res));
+ LInstruction* res =
+ DefineSameAsFirst(new(zone()) LTaggedToI(value, xmm_temp));
+ if (!instr->value()->representation().IsSmi()) {
+ // Note: Only deopts in deferred code.
+ res = AssignEnvironment(res);
+ }
+ return res;
}
}
} else if (from.IsDouble()) {
@@ -1741,41 +1864,37 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(
- DefineAsRegister(new(zone()) LDoubleToI(value)));
+ LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
+ if (!instr->CanTruncateToInt32()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
} else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = FixedTemp(xmm1);
- LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- } else if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineSameAsFirst(new(zone()) LSmiTag(value));
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
+ return DefineAsRegister(new(zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = FixedTemp(xmm1);
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+ return AssignPointerMap(DefineSameAsFirst(result));
} else {
LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ return AssignPointerMap(DefineSameAsFirst(result));
}
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result = NULL;
- if (val->CheckFlag(HInstruction::kUint32)) {
- result = DefineAsRegister(new(zone()) LUint32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange() &&
- val->range()->upper() != kMaxInt) {
- return result;
- }
- } else {
- result = DefineAsRegister(new(zone()) LInteger32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return result;
- }
+ LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ ASSERT(val->CheckFlag(HValue::kUint32));
+ result = AssignEnvironment(result);
}
- return AssignEnvironment(result);
+ return result;
} else {
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
LOperand* temp = FixedTemp(xmm1);
@@ -1826,6 +1945,7 @@ LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
}
LCheckMaps* result = new(zone()) LCheckMaps(value);
if (!instr->CanOmitMapChecks()) {
+ // Note: Only deopts in deferred code.
AssignEnvironment(result);
if (instr->has_migration_target()) return AssignPointerMap(result);
}
@@ -1852,6 +1972,20 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub() ? UseFixed(instr->context(), rsi) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
@@ -1911,7 +2045,10 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -1928,7 +2065,10 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
temp = NULL;
}
LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -1969,32 +2109,51 @@ LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
}
+void LChunkBuilder::FindDehoistedKeyDefinitions(HValue* candidate) {
+ BitVector* dehoisted_key_ids = chunk_->GetDehoistedKeyIds();
+ if (dehoisted_key_ids->Contains(candidate->id())) return;
+ dehoisted_key_ids->Add(candidate->id());
+ if (!candidate->IsPhi()) return;
+ for (int i = 0; i < candidate->OperandCount(); ++i) {
+ FindDehoistedKeyDefinitions(candidate->OperandAt(i));
+ }
+}
+
+
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsInteger32());
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
+ LInstruction* result = NULL;
+
+ if (instr->IsDehoisted()) {
+ FindDehoistedKeyDefinitions(instr->key());
+ }
if (!instr->is_typed_elements()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
- result = new(zone()) LLoadKeyed(obj, key);
+ result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
} else {
ASSERT(
(instr->representation().IsInteger32() &&
- !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
+ !(IsDoubleOrFloatElementsKind(elements_kind))) ||
(instr->representation().IsDouble() &&
- (IsDoubleOrFloatElementsKind(instr->elements_kind()))));
+ (IsDoubleOrFloatElementsKind(elements_kind))));
LOperand* backing_store = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(backing_store, key);
+ result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
}
- DefineAsRegister(result);
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UINT32_ELEMENTS) ||
- (elements_kind == UINT32_ELEMENTS);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- return can_deoptimize ? AssignEnvironment(result) : result;
+ if ((instr->is_external() || instr->is_fixed_typed_array()) ?
+ // see LCodeGen::DoLoadKeyedExternalArray
+ ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS) &&
+ !instr->CheckFlag(HInstruction::kUint32)) :
+ // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+ // LCodeGen::DoLoadKeyedFixedArray
+ instr->RequiresHoleCheck()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2012,6 +2171,10 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
+ if (instr->IsDehoisted()) {
+ FindDehoistedKeyDefinitions(instr->key());
+ }
+
if (!instr->is_typed_elements()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2022,7 +2185,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
Representation value_representation = instr->value()->representation();
if (value_representation.IsDouble()) {
object = UseRegisterAtStart(instr->elements());
- val = UseTempRegister(instr->value());
+ val = UseRegisterAtStart(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
} else {
ASSERT(value_representation.IsSmiOrTagged() ||
@@ -2133,7 +2296,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool can_be_constant = instr->value()->IsConstant() &&
HConstant::cast(instr->value())->NotInNewSpace() &&
- !(FLAG_track_double_fields && instr->field_representation().IsDouble());
+ !instr->field_representation().IsDouble();
LOperand* val;
if (needs_write_barrier) {
@@ -2142,10 +2305,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
val = UseFixed(instr->value(), rax);
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
- } else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ } else if (instr->field_representation().IsSmi()) {
val = UseRegister(instr->value());
- } else if (FLAG_track_double_fields &&
- instr->field_representation().IsDouble()) {
+ } else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
@@ -2156,12 +2318,13 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = (!is_in_object || needs_write_barrier ||
needs_write_barrier_for_map) ? TempRegister() : NULL;
- LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject()) {
- if (!instr->value()->type().IsHeapObject()) {
- return AssignEnvironment(result);
- }
+ LInstruction* result = new(zone()) LStoreNamedField(obj, val, temp);
+ if (!instr->access().IsExternalMemory() &&
+ instr->field_representation().IsHeapObject() &&
+ (val->IsConstantOperand()
+ ? HConstant::cast(instr->value())->HasSmiValue()
+ : !instr->value()->type().IsHeapObject())) {
+ result = AssignEnvironment(result);
}
return result;
}
@@ -2193,7 +2356,7 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* context = UseAny(instr->context());
LStringCharCodeAt* result =
new(zone()) LStringCharCodeAt(context, string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ return AssignPointerMap(DefineAsRegister(result));
}
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 36b274440..9d9ac1ea1 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -80,17 +80,23 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
V(DivI) \
+ V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(DummyUse) \
V(Dummy) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
@@ -103,7 +109,6 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -124,14 +129,16 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
+ V(MathClz32) \
V(MathExp) \
V(MathFloor) \
- V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -170,7 +177,6 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
- V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(WrapReceiver)
@@ -265,6 +271,10 @@ class LInstruction : public ZoneObject {
virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+ virtual bool MustSignExtendResult(LPlatformChunk* chunk) const {
+ return false;
+ }
+
#ifdef DEBUG
void VerifyCall();
#endif
@@ -300,6 +310,9 @@ class LTemplateResultInstruction : public LInstruction {
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() const { return results_[0]; }
+ virtual bool MustSignExtendResult(
+ LPlatformChunk* chunk) const V8_FINAL V8_OVERRIDE;
+
protected:
EmbeddedContainer<LOperand*, R> results_;
};
@@ -614,6 +627,49 @@ class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LModByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -631,6 +687,49 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -643,29 +742,55 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
- bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
-
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
};
-class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
};
@@ -762,6 +887,18 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathClz32(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
@@ -1829,19 +1966,6 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
@@ -1856,19 +1980,6 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
@@ -1881,15 +1992,17 @@ class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LNumberTagU(LOperand* value, LOperand* temp) {
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
- temps_[0] = temp;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
@@ -1966,6 +2079,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -2041,7 +2155,7 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2096,7 +2210,7 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2300,6 +2414,33 @@ class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
@@ -2492,10 +2633,18 @@ class LChunkBuilder;
class LPlatformChunk V8_FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph) { }
+ : LChunk(info, graph),
+ dehoisted_key_ids_(graph->GetMaximumValueID(), graph->zone()) { }
int GetNextSpillIndex(RegisterKind kind);
LOperand* GetNextSpillSlot(RegisterKind kind);
+ BitVector* GetDehoistedKeyIds() { return &dehoisted_key_ids_; }
+ bool IsDehoistedKey(HValue* value) {
+ return dehoisted_key_ids_.Contains(value->id());
+ }
+
+ private:
+ BitVector dehoisted_key_ids_;
};
@@ -2529,6 +2678,15 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HBinaryOperation* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
private:
enum Status {
@@ -2637,6 +2795,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
HBinaryOperation* instr);
+ void FindDehoistedKeyDefinitions(HValue* candidate);
LPlatformChunk* chunk_;
CompilationInfo* info_;
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 4c19fced6..6f313f7a6 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -128,7 +128,7 @@ void MacroAssembler::LoadAddress(Register destination,
intptr_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
- lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
+ leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
}
@@ -145,7 +145,7 @@ int MacroAssembler::LoadAddressSize(ExternalReference source) {
intptr_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
- // Operand is lea(scratch, Operand(kRootRegister, delta));
+ // Operand is leap(scratch, Operand(kRootRegister, delta));
// Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
int size = 4;
if (!is_int8(static_cast<int32_t>(delta))) {
@@ -165,11 +165,11 @@ void MacroAssembler::PushAddress(ExternalReference source) {
if (emit_debug_code()) {
Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
}
- push(Immediate(static_cast<int32_t>(address)));
+ Push(Immediate(static_cast<int32_t>(address)));
return;
}
LoadAddress(kScratchRegister, source);
- push(kScratchRegister);
+ Push(kScratchRegister);
}
@@ -200,13 +200,13 @@ void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
ASSERT(root_array_available_);
- push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
+ Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
ASSERT(root_array_available_);
- cmpq(with, Operand(kRootRegister,
+ cmpp(with, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
@@ -216,7 +216,7 @@ void MacroAssembler::CompareRoot(const Operand& with,
ASSERT(root_array_available_);
ASSERT(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
- cmpq(with, kScratchRegister);
+ cmpp(with, kScratchRegister);
}
@@ -236,13 +236,13 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
// Store pointer to buffer.
movp(Operand(scratch, 0), addr);
// Increment buffer top.
- addq(scratch, Immediate(kPointerSize));
+ addp(scratch, Immediate(kPointerSize));
// Write back new top of buffer.
StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
// Call stub on end of buffer.
Label done;
// Check for end of buffer.
- testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+ testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
if (and_then == kReturnAtEnd) {
Label buffer_overflowed;
j(not_equal, &buffer_overflowed, Label::kNear);
@@ -276,13 +276,13 @@ void MacroAssembler::InNewSpace(Register object,
// and the running system.
if (scratch.is(object)) {
Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
- and_(scratch, kScratchRegister);
+ andp(scratch, kScratchRegister);
} else {
Move(scratch, ExternalReference::new_space_mask(isolate()));
- and_(scratch, object);
+ andp(scratch, object);
}
Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
- cmpq(scratch, kScratchRegister);
+ cmpp(scratch, kScratchRegister);
j(cc, branch, distance);
} else {
ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
@@ -291,11 +291,11 @@ void MacroAssembler::InNewSpace(Register object,
Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
Assembler::RelocInfoNone());
if (scratch.is(object)) {
- addq(scratch, kScratchRegister);
+ addp(scratch, kScratchRegister);
} else {
- lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+ leap(scratch, Operand(object, kScratchRegister, times_1, 0));
}
- and_(scratch,
+ andp(scratch,
Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
j(cc, branch, distance);
}
@@ -323,7 +323,7 @@ void MacroAssembler::RecordWriteField(
// of the object, so so offset must be a multiple of kPointerSize.
ASSERT(IsAligned(offset, kPointerSize));
- lea(dst, FieldOperand(object, offset));
+ leap(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
@@ -363,7 +363,7 @@ void MacroAssembler::RecordWriteArray(Register object,
// Array access: calculate the destination address. Index is not a smi.
Register dst = index;
- lea(dst, Operand(object, index, times_pointer_size,
+ leap(dst, Operand(object, index, times_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
RecordWrite(
@@ -398,7 +398,7 @@ void MacroAssembler::RecordWrite(Register object,
if (emit_debug_code()) {
Label ok;
- cmpq(value, Operand(address, 0));
+ cmpp(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
int3();
bind(&ok);
@@ -483,7 +483,7 @@ void MacroAssembler::CheckStackAlignment() {
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
Label alignment_as_expected;
- testq(rsp, Immediate(frame_alignment_mask));
+ testp(rsp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected, Label::kNear);
// Abort if stack is not aligned.
int3();
@@ -505,17 +505,8 @@ void MacroAssembler::NegativeZeroTest(Register result,
void MacroAssembler::Abort(BailoutReason reason) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -527,21 +518,18 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- push(rax);
- Move(kScratchRegister, reinterpret_cast<Smi*>(p0),
- Assembler::RelocInfoNone());
- push(kScratchRegister);
- Move(kScratchRegister, Smi::FromInt(static_cast<int>(p1 - p0)),
+ Push(rax);
+ Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
Assembler::RelocInfoNone());
- push(kScratchRegister);
+ Push(kScratchRegister);
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// Control will not return here.
int3();
@@ -572,7 +560,7 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
- addq(rsp, Immediate(num_arguments * kPointerSize));
+ addp(rsp, Immediate(num_arguments * kPointerSize));
}
LoadRoot(rax, Heap::kUndefinedValueRootIndex);
}
@@ -588,7 +576,7 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
// the slow case, converting the key to a smi is always valid.
// key: string key
// hash: key's hash field, including its array index value.
- and_(hash, Immediate(String::kArrayIndexValueMask));
+ andp(hash, Immediate(String::kArrayIndexValueMask));
shr(hash, Immediate(String::kHashShift));
// Here we actually clobber the key which will be used if calling into
// runtime later. However as the new key is the numeric value of a string key
@@ -757,7 +745,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
// previous handle scope.
subl(Operand(base_reg, kLevelOffset), Immediate(1));
movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
- cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
j(not_equal, &delete_allocated_handles);
bind(&leave_exit_frame);
@@ -812,7 +800,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
bind(&promote_scheduled_exception);
{
FrameScope frame(this, StackFrame::INTERNAL);
- CallRuntime(Runtime::kPromoteScheduledException, 0);
+ CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
}
jmp(&exception_handled);
@@ -893,12 +881,12 @@ void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
for (int i = 0; i < kNumberOfSavedRegs; i++) {
Register reg = saved_regs[i];
if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- push(reg);
+ pushq(reg);
}
}
// R12 to r15 are callee save on all platforms.
if (fp_mode == kSaveFPRegs) {
- subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(Operand(rsp, i * kDoubleSize), reg);
@@ -916,12 +904,12 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
XMMRegister reg = XMMRegister::from_code(i);
movsd(reg, Operand(rsp, i * kDoubleSize));
}
- addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
}
for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
Register reg = saved_regs[i];
if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- pop(reg);
+ popq(reg);
}
}
}
@@ -984,12 +972,17 @@ void MacroAssembler::Set(Register dst, int64_t x) {
}
-void MacroAssembler::Set(const Operand& dst, int64_t x) {
- if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
+void MacroAssembler::Set(const Operand& dst, intptr_t x) {
+ if (kPointerSize == kInt64Size) {
+ if (is_int32(x)) {
+ movp(dst, Immediate(static_cast<int32_t>(x)));
+ } else {
+ Set(kScratchRegister, x);
+ movp(dst, kScratchRegister);
+ }
} else {
- Set(kScratchRegister, x);
- movq(dst, kScratchRegister);
+ ASSERT(kPointerSize == kInt32Size);
+ movp(dst, Immediate(static_cast<int32_t>(x)));
}
}
@@ -1009,7 +1002,7 @@ void MacroAssembler::SafeMove(Register dst, Smi* src) {
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xor_(dst, kScratchRegister);
+ xorq(dst, kScratchRegister);
} else {
Move(dst, src);
}
@@ -1021,7 +1014,7 @@ void MacroAssembler::SafePush(Smi* src) {
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
Push(Smi::FromInt(src->value() ^ jit_cookie()));
Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xor_(Operand(rsp, 0), kScratchRegister);
+ xorq(Operand(rsp, 0), kScratchRegister);
} else {
Push(src);
}
@@ -1059,24 +1052,28 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
switch (uvalue) {
case 9:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
break;
case 8:
xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
break;
case 4:
xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
break;
case 5:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
break;
case 3:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
break;
case 2:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
break;
case 1:
movp(dst, kSmiConstantRegister);
@@ -1089,7 +1086,7 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
return;
}
if (negative) {
- neg(dst);
+ negp(dst);
}
}
@@ -1158,14 +1155,14 @@ void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
void MacroAssembler::SmiTest(Register src) {
AssertSmi(src);
- testq(src, src);
+ testp(src, src);
}
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
AssertSmi(smi1);
AssertSmi(smi2);
- cmpq(smi1, smi2);
+ cmpp(smi1, smi2);
}
@@ -1178,10 +1175,10 @@ void MacroAssembler::SmiCompare(Register dst, Smi* src) {
void MacroAssembler::Cmp(Register dst, Smi* src) {
ASSERT(!dst.is(kScratchRegister));
if (src->value() == 0) {
- testq(dst, dst);
+ testp(dst, dst);
} else {
Register constant_reg = GetSmiConstant(src);
- cmpq(dst, constant_reg);
+ cmpp(dst, constant_reg);
}
}
@@ -1189,14 +1186,14 @@ void MacroAssembler::Cmp(Register dst, Smi* src) {
void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
AssertSmi(dst);
AssertSmi(src);
- cmpq(dst, src);
+ cmpp(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
AssertSmi(dst);
AssertSmi(src);
- cmpq(dst, src);
+ cmpp(dst, src);
}
@@ -1210,7 +1207,7 @@ void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
// The Operand cannot use the smi register.
Register smi_reg = GetSmiConstant(src);
ASSERT(!dst.AddressUsesRegister(smi_reg));
- cmpq(dst, smi_reg);
+ cmpp(dst, smi_reg);
}
@@ -1258,12 +1255,12 @@ void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
ASSERT(!src1.is(kScratchRegister));
ASSERT(!src2.is(kScratchRegister));
movp(kScratchRegister, src1);
- or_(kScratchRegister, src2);
+ orp(kScratchRegister, src2);
JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
movp(dst, kScratchRegister);
} else {
movp(dst, src1);
- or_(dst, src2);
+ orp(dst, src2);
JumpIfNotSmi(dst, on_not_smis, near_jump);
}
}
@@ -1310,7 +1307,7 @@ Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
return CheckNonNegativeSmi(first);
}
movp(kScratchRegister, first);
- or_(kScratchRegister, second);
+ orp(kScratchRegister, second);
rol(kScratchRegister, Immediate(1));
testl(kScratchRegister, Immediate(3));
return zero;
@@ -1339,7 +1336,7 @@ Condition MacroAssembler::CheckEitherSmi(Register first,
Condition MacroAssembler::CheckIsMinSmi(Register src) {
ASSERT(!src.is(kScratchRegister));
// If we overflow by subtracting one, it's the minimal smi value.
- cmpq(src, kSmiConstantRegister);
+ cmpp(src, kSmiConstantRegister);
return overflow;
}
@@ -1456,39 +1453,39 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
ASSERT(!dst.is(kScratchRegister));
switch (constant->value()) {
case 1:
- addq(dst, kSmiConstantRegister);
+ addp(dst, kSmiConstantRegister);
return;
case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
return;
case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
return;
case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
return;
default:
Register constant_reg = GetSmiConstant(constant);
- addq(dst, constant_reg);
+ addp(dst, constant_reg);
return;
}
} else {
switch (constant->value()) {
case 1:
- lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_1, 0));
return;
case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
return;
case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
return;
case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
return;
default:
LoadSmiConstant(dst, constant);
- addq(dst, src);
+ addp(dst, src);
return;
}
}
@@ -1515,16 +1512,16 @@ void MacroAssembler::SmiAddConstant(Register dst,
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
j(no_overflow, bailout_label, near_jump);
ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
} else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
Label done;
j(no_overflow, &done, Label::kNear);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
jmp(bailout_label, near_jump);
bind(&done);
} else {
@@ -1538,7 +1535,7 @@ void MacroAssembler::SmiAddConstant(Register dst,
ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
LoadSmiConstant(dst, constant);
- addq(dst, src);
+ addp(dst, src);
j(overflow, bailout_label, near_jump);
}
}
@@ -1552,17 +1549,17 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- subq(dst, constant_reg);
+ subp(dst, constant_reg);
} else {
if (constant->value() == Smi::kMinValue) {
LoadSmiConstant(dst, constant);
// Adding and subtracting the min-value gives the same result, it only
// differs on the overflow bit, which we don't check here.
- addq(dst, src);
+ addp(dst, src);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
- addq(dst, src);
+ addp(dst, src);
}
}
}
@@ -1581,16 +1578,16 @@ void MacroAssembler::SmiSubConstant(Register dst,
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
j(no_overflow, bailout_label, near_jump);
ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
} else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
Label done;
j(no_overflow, &done, Label::kNear);
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
jmp(bailout_label, near_jump);
bind(&done);
} else {
@@ -1607,12 +1604,12 @@ void MacroAssembler::SmiSubConstant(Register dst,
ASSERT(!dst.is(kScratchRegister));
movp(dst, src);
LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
j(overflow, bailout_label, near_jump);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
- addq(dst, src);
+ addp(dst, src);
j(overflow, bailout_label, near_jump);
}
}
@@ -1626,15 +1623,15 @@ void MacroAssembler::SmiNeg(Register dst,
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
movp(kScratchRegister, src);
- neg(dst); // Low 32 bits are retained as zero by negation.
+ negp(dst); // Low 32 bits are retained as zero by negation.
// Test if result is zero or Smi::kMinValue.
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
j(not_equal, on_smi_result, near_jump);
movp(src, kScratchRegister);
} else {
movp(dst, src);
- neg(dst);
- cmpq(dst, src);
+ negp(dst);
+ cmpp(dst, src);
// If the result is zero or Smi::kMinValue, negation failed to create a smi.
j(not_equal, on_smi_result, near_jump);
}
@@ -1650,15 +1647,15 @@ static void SmiAddHelper(MacroAssembler* masm,
Label::Distance near_jump) {
if (dst.is(src1)) {
Label done;
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->j(no_overflow, &done, Label::kNear);
// Restore src1.
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->jmp(on_not_smi_result, near_jump);
masm->bind(&done);
} else {
masm->movp(dst, src1);
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->j(overflow, on_not_smi_result, near_jump);
}
}
@@ -1694,12 +1691,12 @@ void MacroAssembler::SmiAdd(Register dst,
if (!dst.is(src1)) {
if (emit_debug_code()) {
movp(kScratchRegister, src1);
- addq(kScratchRegister, src2);
+ addp(kScratchRegister, src2);
Check(no_overflow, kSmiAdditionOverflow);
}
- lea(dst, Operand(src1, src2, times_1, 0));
+ leap(dst, Operand(src1, src2, times_1, 0));
} else {
- addq(dst, src2);
+ addp(dst, src2);
Assert(no_overflow, kSmiAdditionOverflow);
}
}
@@ -1714,15 +1711,15 @@ static void SmiSubHelper(MacroAssembler* masm,
Label::Distance near_jump) {
if (dst.is(src1)) {
Label done;
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->j(no_overflow, &done, Label::kNear);
// Restore src1.
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->jmp(on_not_smi_result, near_jump);
masm->bind(&done);
} else {
masm->movp(dst, src1);
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->j(overflow, on_not_smi_result, near_jump);
}
}
@@ -1760,7 +1757,7 @@ static void SmiSubNoOverflowHelper(MacroAssembler* masm,
if (!dst.is(src1)) {
masm->movp(dst, src1);
}
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->Assert(no_overflow, kSmiSubtractionOverflow);
}
@@ -1792,17 +1789,17 @@ void MacroAssembler::SmiMul(Register dst,
Label failure, zero_correct_result;
movp(kScratchRegister, src1); // Create backup for later testing.
SmiToInteger64(dst, src1);
- imul(dst, src2);
+ imulp(dst, src2);
j(overflow, &failure, Label::kNear);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
Label correct_result;
- testq(dst, dst);
+ testp(dst, dst);
j(not_zero, &correct_result, Label::kNear);
movp(dst, kScratchRegister);
- xor_(dst, src2);
+ xorp(dst, src2);
// Result was positive zero.
j(positive, &zero_correct_result, Label::kNear);
@@ -1816,17 +1813,17 @@ void MacroAssembler::SmiMul(Register dst,
bind(&correct_result);
} else {
SmiToInteger64(dst, src1);
- imul(dst, src2);
+ imulp(dst, src2);
j(overflow, on_not_smi_result, near_jump);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
Label correct_result;
- testq(dst, dst);
+ testp(dst, dst);
j(not_zero, &correct_result, Label::kNear);
// One of src1 and src2 is zero, the check whether the other is
// negative.
movp(kScratchRegister, src1);
- xor_(kScratchRegister, src2);
+ xorp(kScratchRegister, src2);
j(negative, on_not_smi_result, near_jump);
bind(&correct_result);
}
@@ -1846,7 +1843,7 @@ void MacroAssembler::SmiDiv(Register dst,
ASSERT(!src1.is(rdx));
// Check for 0 divisor (result is +/-Infinity).
- testq(src2, src2);
+ testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
@@ -1863,7 +1860,7 @@ void MacroAssembler::SmiDiv(Register dst,
Label safe_div;
testl(rax, Immediate(0x7fffffff));
j(not_zero, &safe_div, Label::kNear);
- testq(src2, src2);
+ testp(src2, src2);
if (src1.is(rax)) {
j(positive, &safe_div, Label::kNear);
movp(src1, kScratchRegister);
@@ -1909,7 +1906,7 @@ void MacroAssembler::SmiMod(Register dst,
ASSERT(!src1.is(rdx));
ASSERT(!src1.is(src2));
- testq(src2, src2);
+ testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
@@ -1945,7 +1942,7 @@ void MacroAssembler::SmiMod(Register dst,
Label smi_result;
testl(rdx, rdx);
j(not_zero, &smi_result, Label::kNear);
- testq(src1, src1);
+ testp(src1, src1);
j(negative, on_not_smi_result, near_jump);
bind(&smi_result);
Integer32ToSmi(dst, rdx);
@@ -1958,11 +1955,11 @@ void MacroAssembler::SmiNot(Register dst, Register src) {
// Set tag and padding bits before negating, so that they are zero afterwards.
movl(kScratchRegister, Immediate(~0));
if (dst.is(src)) {
- xor_(dst, kScratchRegister);
+ xorp(dst, kScratchRegister);
} else {
- lea(dst, Operand(src, kScratchRegister, times_1, 0));
+ leap(dst, Operand(src, kScratchRegister, times_1, 0));
}
- not_(dst);
+ notp(dst);
}
@@ -1971,7 +1968,7 @@ void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
movp(dst, src1);
}
- and_(dst, src2);
+ andp(dst, src2);
}
@@ -1981,10 +1978,10 @@ void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- and_(dst, constant_reg);
+ andp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- and_(dst, src);
+ andp(dst, src);
}
}
@@ -1994,7 +1991,7 @@ void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
ASSERT(!src1.is(src2));
movp(dst, src1);
}
- or_(dst, src2);
+ orp(dst, src2);
}
@@ -2002,10 +1999,10 @@ void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- or_(dst, constant_reg);
+ orp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- or_(dst, src);
+ orp(dst, src);
}
}
@@ -2015,7 +2012,7 @@ void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
ASSERT(!src1.is(src2));
movp(dst, src1);
}
- xor_(dst, src2);
+ xorp(dst, src2);
}
@@ -2023,10 +2020,10 @@ void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- xor_(dst, constant_reg);
+ xorp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- xor_(dst, src);
+ xorp(dst, src);
}
}
@@ -2067,7 +2064,7 @@ void MacroAssembler::SmiShiftLogicalRightConstant(
} else {
movp(dst, src);
if (shift_value == 0) {
- testq(dst, dst);
+ testp(dst, dst);
j(negative, on_not_smi_result, near_jump);
}
shr(dst, Immediate(shift_value + kSmiShift));
@@ -2086,7 +2083,7 @@ void MacroAssembler::SmiShiftLeft(Register dst,
}
SmiToInteger32(rcx, src2);
// Shift amount specified by lower 5 bits, not six as the shl opcode.
- and_(rcx, Immediate(0x1f));
+ andq(rcx, Immediate(0x1f));
shl_cl(dst);
}
@@ -2175,7 +2172,7 @@ void MacroAssembler::SelectNonSmi(Register dst,
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
movl(kScratchRegister, Immediate(kSmiTagMask));
- and_(kScratchRegister, src1);
+ andp(kScratchRegister, src1);
testl(kScratchRegister, src2);
// If non-zero then both are smis.
j(not_zero, on_not_smis, near_jump);
@@ -2183,13 +2180,13 @@ void MacroAssembler::SelectNonSmi(Register dst,
// Exactly one operand is a smi.
ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
// kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
- subq(kScratchRegister, Immediate(1));
+ subp(kScratchRegister, Immediate(1));
// If src1 is a smi, then scratch register all 1s, else it is all 0s.
movp(dst, src1);
- xor_(dst, src2);
- and_(dst, kScratchRegister);
+ xorp(dst, src2);
+ andp(dst, kScratchRegister);
// If src1 is a smi, dst holds src1 ^ src2, else it is zero.
- xor_(dst, src1);
+ xorp(dst, src1);
// If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
}
@@ -2219,7 +2216,7 @@ SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
if (!dst.is(src)) {
movq(dst, src);
}
- neg(dst);
+ negq(dst);
if (shift < kSmiShift) {
sar(dst, Immediate(kSmiShift - shift));
} else {
@@ -2238,10 +2235,10 @@ void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
void MacroAssembler::Push(Smi* source) {
intptr_t smi = reinterpret_cast<intptr_t>(source);
if (is_int32(smi)) {
- push(Immediate(static_cast<int32_t>(smi)));
+ Push(Immediate(static_cast<int32_t>(smi)));
} else {
Register constant = GetSmiConstant(source);
- push(constant);
+ Push(constant);
}
}
@@ -2251,22 +2248,22 @@ void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
// High bits.
shr(src, Immediate(64 - kSmiShift));
shl(src, Immediate(kSmiShift));
- push(src);
+ Push(src);
// Low bits.
shl(scratch, Immediate(kSmiShift));
- push(scratch);
+ Push(scratch);
}
void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
- pop(scratch);
+ Pop(scratch);
// Low bits.
shr(scratch, Immediate(kSmiShift));
- pop(dst);
+ Pop(dst);
shr(dst, Immediate(kSmiShift));
// High bits.
shl(dst, Immediate(64 - kSmiShift));
- or_(dst, scratch);
+ orp(dst, scratch);
}
@@ -2296,7 +2293,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
SmiToInteger32(
mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
shrl(mask, Immediate(1));
- subq(mask, Immediate(1)); // Make mask.
+ subp(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value, and the hash for
@@ -2312,8 +2309,8 @@ void MacroAssembler::LookupNumberStringCache(Register object,
STATIC_ASSERT(8 == kDoubleSize);
movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- and_(scratch, mask);
+ xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ andp(scratch, mask);
// Each entry in string cache consists of two pointer sized fields,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
@@ -2336,7 +2333,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
bind(&is_smi);
SmiToInteger32(scratch, object);
- and_(scratch, mask);
+ andp(scratch, mask);
// Each entry in string cache consists of two pointer sized fields,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
@@ -2344,7 +2341,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
shl(scratch, Immediate(kPointerSizeLog2 + 1));
// Check if the entry is the smi we are looking for.
- cmpq(object,
+ cmpp(object,
FieldOperand(number_string_cache,
index,
times_1,
@@ -2401,7 +2398,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
j(not_equal, on_fail, near_jump);
@@ -2448,7 +2445,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
j(not_equal, on_fail, near_jump);
@@ -2520,7 +2517,7 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
Cmp(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
}
}
@@ -2531,7 +2528,7 @@ void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
Cmp(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
}
}
@@ -2542,7 +2539,7 @@ void MacroAssembler::Push(Handle<Object> source) {
Push(Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- push(kScratchRegister);
+ Push(kScratchRegister);
}
}
@@ -2574,7 +2571,87 @@ void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
- addq(rsp, Immediate(stack_elements * kPointerSize));
+ addp(rsp, Immediate(stack_elements * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::Push(Register src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ // x32 uses 64-bit push for rbp in the prologue.
+ ASSERT(src.code() != rbp.code());
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), src);
+ }
+}
+
+
+void MacroAssembler::Push(const Operand& src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ movp(kScratchRegister, src);
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::Push(Immediate value) {
+ if (kPointerSize == kInt64Size) {
+ pushq(value);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), value);
+ }
+}
+
+
+void MacroAssembler::PushImm32(int32_t imm32) {
+ if (kPointerSize == kInt64Size) {
+ pushq_imm32(imm32);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), Immediate(imm32));
+ }
+}
+
+
+void MacroAssembler::Pop(Register dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ // x32 uses 64-bit pop for rbp in the epilogue.
+ ASSERT(dst.code() != rbp.code());
+ movp(dst, Operand(rsp, 0));
+ leal(rsp, Operand(rsp, 4));
+ }
+}
+
+
+void MacroAssembler::Pop(const Operand& dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ Register scratch = dst.AddressUsesRegister(kScratchRegister)
+ ? kSmiConstantRegister : kScratchRegister;
+ movp(scratch, Operand(rsp, 0));
+ movp(dst, scratch);
+ leal(rsp, Operand(rsp, 4));
+ if (scratch.is(kSmiConstantRegister)) {
+ // Restore kSmiConstantRegister.
+ movp(kSmiConstantRegister,
+ reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)),
+ Assembler::RelocInfoNone());
+ }
}
}
@@ -2592,6 +2669,17 @@ void MacroAssembler::Jump(ExternalReference ext) {
}
+void MacroAssembler::Jump(const Operand& op) {
+ if (kPointerSize == kInt64Size) {
+ jmp(op);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ movp(kScratchRegister, op);
+ jmp(kScratchRegister);
+ }
+}
+
+
void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
Move(kScratchRegister, destination, rmode);
jmp(kScratchRegister);
@@ -2623,6 +2711,17 @@ void MacroAssembler::Call(ExternalReference ext) {
}
+void MacroAssembler::Call(const Operand& op) {
+ if (kPointerSize == kInt64Size) {
+ call(op);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ movp(kScratchRegister, op);
+ call(kScratchRegister);
+ }
+}
+
+
void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
#ifdef DEBUG
int end_position = pc_offset() + CallSize(destination);
@@ -2651,26 +2750,26 @@ void MacroAssembler::Call(Handle<Code> code_object,
void MacroAssembler::Pushad() {
- push(rax);
- push(rcx);
- push(rdx);
- push(rbx);
+ Push(rax);
+ Push(rcx);
+ Push(rdx);
+ Push(rbx);
// Not pushing rsp or rbp.
- push(rsi);
- push(rdi);
- push(r8);
- push(r9);
+ Push(rsi);
+ Push(rdi);
+ Push(r8);
+ Push(r9);
// r10 is kScratchRegister.
- push(r11);
+ Push(r11);
// r12 is kSmiConstantRegister.
// r13 is kRootRegister.
- push(r14);
- push(r15);
+ Push(r14);
+ Push(r15);
STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
// Use lea for symmetry with Popad.
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, -sp_delta));
+ leap(rsp, Operand(rsp, -sp_delta));
}
@@ -2678,23 +2777,23 @@ void MacroAssembler::Popad() {
// Popad must not change the flags, so use lea instead of addq.
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, sp_delta));
- pop(r15);
- pop(r14);
- pop(r11);
- pop(r9);
- pop(r8);
- pop(rdi);
- pop(rsi);
- pop(rbx);
- pop(rdx);
- pop(rcx);
- pop(rax);
+ leap(rsp, Operand(rsp, sp_delta));
+ Pop(r15);
+ Pop(r14);
+ Pop(r11);
+ Pop(r9);
+ Pop(r8);
+ Pop(rdi);
+ Pop(rsi);
+ Pop(rbx);
+ Pop(rdx);
+ Pop(rcx);
+ Pop(rax);
}
void MacroAssembler::Dropad() {
- addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
+ addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
}
@@ -2759,23 +2858,23 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
// The frame pointer does not point to a JS frame so we save NULL for
// rbp. We expect the code throwing an exception to check rbp before
// dereferencing it to restore the context.
- push(Immediate(0)); // NULL frame pointer.
+ pushq(Immediate(0)); // NULL frame pointer.
Push(Smi::FromInt(0)); // No context.
} else {
- push(rbp);
- push(rsi);
+ pushq(rbp);
+ Push(rsi);
}
// Push the state and the code object.
unsigned state =
StackHandler::IndexField::encode(handler_index) |
StackHandler::KindField::encode(kind);
- push(Immediate(state));
+ Push(Immediate(state));
Push(CodeObject());
// Link the current handler as the next handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- push(ExternalOperand(handler_address));
+ Push(ExternalOperand(handler_address));
// Set this new handler as the current one.
movp(ExternalOperand(handler_address), rsp);
}
@@ -2784,8 +2883,8 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
void MacroAssembler::PopTryHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- pop(ExternalOperand(handler_address));
- addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+ Pop(ExternalOperand(handler_address));
+ addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
@@ -2798,7 +2897,7 @@ void MacroAssembler::JumpToHandlerEntry() {
movp(rdx,
FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
SmiToInteger64(rdx, rdx);
- lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
+ leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
jmp(rdi);
}
@@ -2821,21 +2920,21 @@ void MacroAssembler::Throw(Register value) {
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
movp(rsp, ExternalOperand(handler_address));
// Restore the next handler.
- pop(ExternalOperand(handler_address));
+ Pop(ExternalOperand(handler_address));
// Remove the code object and state, compute the handler address in rdi.
- pop(rdi); // Code object.
- pop(rdx); // Offset and state.
+ Pop(rdi); // Code object.
+ Pop(rdx); // Offset and state.
// Restore the context and frame pointer.
- pop(rsi); // Context.
- pop(rbp); // Frame pointer.
+ Pop(rsi); // Context.
+ popq(rbp); // Frame pointer.
// If the handler is a JS frame, restore the context to the frame.
// (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
// rbp or rsi.
Label skip;
- testq(rsi, rsi);
+ testp(rsi, rsi);
j(zero, &skip, Label::kNear);
movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
bind(&skip);
@@ -2875,15 +2974,15 @@ void MacroAssembler::ThrowUncatchable(Register value) {
j(not_zero, &fetch_next);
// Set the top handler address to next handler past the top ENTRY handler.
- pop(ExternalOperand(handler_address));
+ Pop(ExternalOperand(handler_address));
// Remove the code object and state, compute the handler address in rdi.
- pop(rdi); // Code object.
- pop(rdx); // Offset and state.
+ Pop(rdi); // Code object.
+ Pop(rdx); // Offset and state.
// Clear the context pointer and frame pointer (0 was saved in the handler).
- pop(rsi);
- pop(rbp);
+ Pop(rsi);
+ popq(rbp);
JumpToHandlerEntry();
}
@@ -2899,7 +2998,7 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
ret(bytes_dropped);
} else {
PopReturnAddressTo(scratch);
- addq(rsp, Immediate(bytes_dropped));
+ addp(rsp, Immediate(bytes_dropped));
PushReturnAddressFrom(scratch);
ret(0);
}
@@ -3059,10 +3158,10 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
cvtsd2si(result_reg, input_reg);
testl(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
- cmpl(result_reg, Immediate(0x80000000));
- j(equal, &conv_failure, Label::kNear);
+ cmpl(result_reg, Immediate(1));
+ j(overflow, &conv_failure, Label::kNear);
movl(result_reg, Immediate(0));
- setcc(above, result_reg);
+ setcc(sign, result_reg);
subl(result_reg, Immediate(1));
andl(result_reg, Immediate(255));
jmp(&done, Label::kNear);
@@ -3099,16 +3198,15 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
Label done;
movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2siq(result_reg, xmm0);
- Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- cmpq(result_reg, kScratchRegister);
- j(not_equal, &done, Label::kNear);
+ cmpq(result_reg, Immediate(1));
+ j(no_overflow, &done, Label::kNear);
// Slow case.
if (input_reg.is(result_reg)) {
- subq(rsp, Immediate(kDoubleSize));
+ subp(rsp, Immediate(kDoubleSize));
movsd(MemOperand(rsp, 0), xmm0);
SlowTruncateToI(result_reg, rsp, 0);
- addq(rsp, Immediate(kDoubleSize));
+ addp(rsp, Immediate(kDoubleSize));
} else {
SlowTruncateToI(result_reg, input_reg);
}
@@ -3121,14 +3219,13 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
XMMRegister input_reg) {
Label done;
cvttsd2siq(result_reg, input_reg);
- movq(kScratchRegister, V8_INT64_C(0x8000000000000000));
- cmpq(result_reg, kScratchRegister);
- j(not_equal, &done, Label::kNear);
+ cmpq(result_reg, Immediate(1));
+ j(no_overflow, &done, Label::kNear);
- subq(rsp, Immediate(kDoubleSize));
+ subp(rsp, Immediate(kDoubleSize));
movsd(MemOperand(rsp, 0), input_reg);
SlowTruncateToI(result_reg, rsp, 0);
- addq(rsp, Immediate(kDoubleSize));
+ addp(rsp, Immediate(kDoubleSize));
bind(&done);
}
@@ -3204,15 +3301,15 @@ void MacroAssembler::Throw(BailoutReason reason) {
}
#endif
- push(rax);
+ Push(rax);
Push(Smi::FromInt(reason));
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
} else {
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
}
// Control will not return here.
int3();
@@ -3244,7 +3341,7 @@ void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
movp(dst, FieldOperand(map, Map::kBitField3Offset));
Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
- and_(dst, kScratchRegister);
+ andp(dst, kScratchRegister);
}
@@ -3315,10 +3412,10 @@ void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAString);
- push(object);
+ Push(object);
movp(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, FIRST_NONSTRING_TYPE);
- pop(object);
+ Pop(object);
Check(below, kOperandIsNotAString);
}
}
@@ -3328,22 +3425,35 @@ void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAName);
- push(object);
+ Push(object);
movp(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, LAST_NAME_TYPE);
- pop(object);
+ Pop(object);
Check(below_equal, kOperandIsNotAName);
}
}
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ Cmp(object, isolate()->factory()->undefined_value());
+ j(equal, &done_checking);
+ Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
+ Assert(equal, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
+
void MacroAssembler::AssertRootValue(Register src,
Heap::RootListIndex root_value_index,
BailoutReason reason) {
if (emit_debug_code()) {
ASSERT(!src.is(kScratchRegister));
LoadRoot(kScratchRegister, root_value_index);
- cmpq(src, kScratchRegister);
+ cmpp(src, kScratchRegister);
Check(equal, reason);
}
}
@@ -3591,14 +3701,14 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// Expected is in register, actual is immediate. This is the
// case when we invoke function values without going through the
// IC mechanism.
- cmpq(expected.reg(), Immediate(actual.immediate()));
+ cmpp(expected.reg(), Immediate(actual.immediate()));
j(equal, &invoke, Label::kNear);
ASSERT(expected.reg().is(rbx));
Set(rax, actual.immediate());
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
- cmpq(expected.reg(), actual.reg());
+ cmpp(expected.reg(), actual.reg());
j(equal, &invoke, Label::kNear);
ASSERT(actual.reg().is(rax));
ASSERT(expected.reg().is(rbx));
@@ -3609,7 +3719,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (!code_constant.is_null()) {
Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
- addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
} else if (!code_register.is(rdx)) {
movp(rdx, code_register);
}
@@ -3631,9 +3741,9 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
if (frame_mode == BUILD_STUB_FRAME) {
- push(rbp); // Caller's frame pointer.
+ pushq(rbp); // Caller's frame pointer.
movp(rbp, rsp);
- push(rsi); // Callee's context.
+ Push(rsi); // Callee's context.
Push(Smi::FromInt(StackFrame::STUB));
} else {
PredictableCodeSizeScope predictible_code_size_scope(this,
@@ -3644,27 +3754,27 @@ void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
RelocInfo::CODE_AGE_SEQUENCE);
Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
} else {
- push(rbp); // Caller's frame pointer.
+ pushq(rbp); // Caller's frame pointer.
movp(rbp, rsp);
- push(rsi); // Callee's context.
- push(rdi); // Callee's JS function.
+ Push(rsi); // Callee's context.
+ Push(rdi); // Callee's JS function.
}
}
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
- push(rbp);
+ pushq(rbp);
movp(rbp, rsp);
- push(rsi); // Context.
+ Push(rsi); // Context.
Push(Smi::FromInt(type));
Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister);
+ Push(kScratchRegister);
if (emit_debug_code()) {
Move(kScratchRegister,
isolate()->factory()->undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
- cmpq(Operand(rsp, 0), kScratchRegister);
+ cmpp(Operand(rsp, 0), kScratchRegister);
Check(not_equal, kCodeObjectNotProperlyPatched);
}
}
@@ -3673,11 +3783,11 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
Move(kScratchRegister, Smi::FromInt(type));
- cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
+ cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
Check(equal, kStackFrameTypesMustMatch);
}
movp(rsp, rbp);
- pop(rbp);
+ popq(rbp);
}
@@ -3688,14 +3798,14 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
kFPOnStackSize + kPCOnStackSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
- push(rbp);
+ pushq(rbp);
movp(rbp, rsp);
// Reserve room for entry stack pointer and push the code object.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
- push(Immediate(0)); // Saved entry sp, patched before call.
+ Push(Immediate(0)); // Saved entry sp, patched before call.
Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister); // Accessed from EditFrame::code_slot.
+ Push(kScratchRegister); // Accessed from EditFrame::code_slot.
// Save the frame pointer and the context in top.
if (save_rax) {
@@ -3717,14 +3827,14 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
if (save_doubles) {
int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
arg_stack_space * kRegisterSize;
- subq(rsp, Immediate(space));
+ subp(rsp, Immediate(space));
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else if (arg_stack_space > 0) {
- subq(rsp, Immediate(arg_stack_space * kRegisterSize));
+ subp(rsp, Immediate(arg_stack_space * kRegisterSize));
}
// Get the required frame alignment for the OS.
@@ -3732,7 +3842,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
if (kFrameAlignment > 0) {
ASSERT(IsPowerOf2(kFrameAlignment));
ASSERT(is_int8(kFrameAlignment));
- and_(rsp, Immediate(-kFrameAlignment));
+ andp(rsp, Immediate(-kFrameAlignment));
}
// Patch the saved entry sp.
@@ -3746,7 +3856,7 @@ void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
// Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- lea(r15, Operand(rbp, r14, times_pointer_size, offset));
+ leap(r15, Operand(rbp, r14, times_pointer_size, offset));
EnterExitFrameEpilogue(arg_stack_space, save_doubles);
}
@@ -3774,7 +3884,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Drop everything up to and including the arguments and the receiver
// from the caller stack.
- lea(rsp, Operand(r15, 1 * kPointerSize));
+ leap(rsp, Operand(r15, 1 * kPointerSize));
PushReturnAddressFrom(rcx);
@@ -3784,7 +3894,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
movp(rsp, rbp);
- pop(rbp);
+ popq(rbp);
LeaveExitFrameEpilogue(restore_context);
}
@@ -3821,7 +3931,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
- cmpq(scratch, Immediate(0));
+ cmpp(scratch, Immediate(0));
Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
@@ -3838,7 +3948,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
// Check if both contexts are the same.
- cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
j(equal, &same_contexts);
// Compare security tokens.
@@ -3849,7 +3959,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check the context is a native context.
if (emit_debug_code()) {
// Preserve original value of holder_reg.
- push(holder_reg);
+ Push(holder_reg);
movp(holder_reg,
FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
CompareRoot(holder_reg, Heap::kNullValueRootIndex);
@@ -3859,7 +3969,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
- pop(holder_reg);
+ Pop(holder_reg);
}
movp(kScratchRegister,
@@ -3867,7 +3977,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
int token_offset =
Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
movp(scratch, FieldOperand(scratch, token_offset));
- cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
+ cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
j(not_equal, miss);
bind(&same_contexts);
@@ -3958,14 +4068,14 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
if (i > 0) {
addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
}
- and_(r2, r1);
+ andp(r2, r1);
// Scale the index by multiplying by the entry size.
ASSERT(SeededNumberDictionary::kEntrySize == 3);
- lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
+ leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
- cmpq(key, FieldOperand(elements,
+ cmpp(key, FieldOperand(elements,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
@@ -4005,7 +4115,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
#ifdef DEBUG
// Assert that result actually contains top on entry.
Operand top_operand = ExternalOperand(allocation_top);
- cmpq(result, top_operand);
+ cmpp(result, top_operand);
Check(equal, kUnexpectedAllocationTop);
#endif
return;
@@ -4026,7 +4136,7 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch,
AllocationFlags flags) {
if (emit_debug_code()) {
- testq(result_end, Immediate(kObjectAlignmentMask));
+ testp(result_end, Immediate(kObjectAlignmentMask));
Check(zero, kUnalignedAllocationInNewSpace);
}
@@ -4086,10 +4196,10 @@ void MacroAssembler::Allocate(int object_size,
if (!top_reg.is(result)) {
movp(top_reg, result);
}
- addq(top_reg, Immediate(object_size));
+ addp(top_reg, Immediate(object_size));
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
- cmpq(top_reg, limit_operand);
+ cmpp(top_reg, limit_operand);
j(above, gc_required);
// Update allocation top.
@@ -4098,14 +4208,14 @@ void MacroAssembler::Allocate(int object_size,
bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
if (tag_result) {
- subq(result, Immediate(object_size - kHeapObjectTag));
+ subp(result, Immediate(object_size - kHeapObjectTag));
} else {
- subq(result, Immediate(object_size));
+ subp(result, Immediate(object_size));
}
} else if (tag_result) {
// Tag the result if requested.
ASSERT(kHeapObjectTag == 1);
- incq(result);
+ incp(result);
}
}
@@ -4119,7 +4229,7 @@ void MacroAssembler::Allocate(int header_size,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & SIZE_IN_WORDS) == 0);
- lea(result_end, Operand(element_count, element_size, header_size));
+ leap(result_end, Operand(element_count, element_size, header_size));
Allocate(result_end, result, result_end, scratch, gc_required, flags);
}
@@ -4162,10 +4272,10 @@ void MacroAssembler::Allocate(Register object_size,
if (!object_size.is(result_end)) {
movp(result_end, object_size);
}
- addq(result_end, result);
+ addp(result_end, result);
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
- cmpq(result_end, limit_operand);
+ cmpp(result_end, limit_operand);
j(above, gc_required);
// Update allocation top.
@@ -4173,7 +4283,7 @@ void MacroAssembler::Allocate(Register object_size,
// Tag the result if requested.
if ((flags & TAG_OBJECT) != 0) {
- addq(result, Immediate(kHeapObjectTag));
+ addp(result, Immediate(kHeapObjectTag));
}
}
@@ -4183,10 +4293,10 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
- and_(object, Immediate(~kHeapObjectTagMask));
+ andp(object, Immediate(~kHeapObjectTagMask));
Operand top_operand = ExternalOperand(new_space_allocation_top);
#ifdef DEBUG
- cmpq(object, top_operand);
+ cmpp(object, top_operand);
Check(below, kUndoAllocationOfNonAllocatedMemory);
#endif
movp(top_operand, object);
@@ -4217,11 +4327,11 @@ void MacroAssembler::AllocateTwoByteString(Register result,
kObjectAlignmentMask;
ASSERT(kShortSize == 2);
// scratch1 = length * 2 + kObjectAlignmentMask.
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
+ leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
+ andp(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
+ subp(scratch1, Immediate(kHeaderAlignment));
}
// Allocate two byte string in new space.
@@ -4256,10 +4366,10 @@ void MacroAssembler::AllocateAsciiString(Register result,
kObjectAlignmentMask;
movl(scratch1, length);
ASSERT(kCharSize == 1);
- addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
+ addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
+ andp(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
+ subp(scratch1, Immediate(kHeaderAlignment));
}
// Allocate ASCII string in new space.
@@ -4405,12 +4515,12 @@ void MacroAssembler::CopyBytes(Register destination,
// at the end of the ranges.
movp(scratch, length);
shrl(length, Immediate(kPointerSizeLog2));
- repmovsq();
+ repmovsp();
// Move remaining bytes of length.
andl(scratch, Immediate(kPointerSize - 1));
movp(length, Operand(source, scratch, times_1, -kPointerSize));
movp(Operand(destination, scratch, times_1, -kPointerSize), length);
- addq(destination, scratch);
+ addp(destination, scratch);
if (min_length <= kLongStringLimit) {
jmp(&done, Label::kNear);
@@ -4426,7 +4536,7 @@ void MacroAssembler::CopyBytes(Register destination,
// Move remaining bytes of length.
movp(scratch, Operand(source, length, times_1, -kPointerSize));
movp(Operand(destination, length, times_1, -kPointerSize), scratch);
- addq(destination, length);
+ addp(destination, length);
jmp(&done, Label::kNear);
bind(&short_string);
@@ -4438,8 +4548,8 @@ void MacroAssembler::CopyBytes(Register destination,
bind(&short_loop);
movb(scratch, Operand(source, 0));
movb(Operand(destination, 0), scratch);
- incq(source);
- incq(destination);
+ incp(source);
+ incp(destination);
decl(length);
j(not_zero, &short_loop);
}
@@ -4455,9 +4565,9 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
jmp(&entry);
bind(&loop);
movp(Operand(start_offset, 0), filler);
- addq(start_offset, Immediate(kPointerSize));
+ addp(start_offset, Immediate(kPointerSize));
bind(&entry);
- cmpq(start_offset, end_offset);
+ cmpp(start_offset, end_offset);
j(less, &loop);
}
@@ -4505,7 +4615,7 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
int offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
- cmpq(map_in_out, FieldOperand(scratch, offset));
+ cmpp(map_in_out, FieldOperand(scratch, offset));
j(not_equal, no_map_match);
// Use the transitioned cached map.
@@ -4515,30 +4625,6 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- movp(map_out, FieldOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
#ifdef _WIN64
static const int kRegisterPassedArguments = 4;
#else
@@ -4556,15 +4642,6 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
-void MacroAssembler::LoadArrayFunction(Register function) {
- movp(function,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movp(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
- movp(function,
- Operand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map) {
// Load the initial map. The global functions all have initial maps.
@@ -4608,13 +4685,13 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
Abort(kNonObject);
bind(&is_object);
- push(value);
+ Push(value);
movp(value, FieldOperand(string, HeapObject::kMapOffset));
- movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
+ movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- cmpq(value, Immediate(encoding_mask));
- pop(value);
+ cmpp(value, Immediate(encoding_mask));
+ Pop(value);
Check(equal, kUnexpectedStringType);
// The index is assumed to be untagged coming in, tag it to compare with the
@@ -4642,8 +4719,8 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments) {
ASSERT(IsPowerOf2(frame_alignment));
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
- subq(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
- and_(rsp, Immediate(-frame_alignment));
+ subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
+ andp(rsp, Immediate(-frame_alignment));
movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
}
@@ -4712,10 +4789,10 @@ void MacroAssembler::CheckPageFlag(
Label::Distance condition_met_distance) {
ASSERT(cc == zero || cc == not_zero);
if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
movp(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
+ andp(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
testb(Operand(scratch, MemoryChunk::kFlagsOffset),
@@ -4734,7 +4811,7 @@ void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
Move(scratch, map);
movp(scratch, FieldOperand(scratch, Map::kBitField3Offset));
SmiToInteger32(scratch, scratch);
- and_(scratch, Immediate(Map::Deprecated::kMask));
+ andp(scratch, Immediate(Map::Deprecated::kMask));
j(not_zero, if_deprecated);
}
}
@@ -4754,10 +4831,10 @@ void MacroAssembler::JumpIfBlack(Register object,
movp(rcx, mask_scratch);
// Make rcx into a mask that covers both marking bits using the operation
// rcx = mask | (mask << 1).
- lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
+ leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
// Note that we are using a 4-byte aligned 8-byte load.
- and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- cmpq(mask_scratch, rcx);
+ andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ cmpp(mask_scratch, rcx);
j(equal, on_black, on_black_distance);
}
@@ -4791,19 +4868,19 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
movp(bitmap_reg, addr_reg);
// Sign extended 32 bit immediate.
- and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+ andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
movp(rcx, addr_reg);
int shift =
Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
shrl(rcx, Immediate(shift));
- and_(rcx,
+ andp(rcx,
Immediate((Page::kPageAlignmentMask >> shift) &
~(Bitmap::kBytesPerCell - 1)));
- addq(bitmap_reg, rcx);
+ addp(bitmap_reg, rcx);
movp(rcx, addr_reg);
shrl(rcx, Immediate(kPointerSizeLog2));
- and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
+ andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
movl(mask_reg, Immediate(1));
shl_cl(mask_reg);
}
@@ -4828,20 +4905,20 @@ void MacroAssembler::EnsureNotWhite(
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(not_zero, &done, Label::kNear);
if (emit_debug_code()) {
// Check for impossible bit pattern.
Label ok;
- push(mask_scratch);
+ Push(mask_scratch);
// shl. May overflow making the check conservative.
- addq(mask_scratch, mask_scratch);
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ addp(mask_scratch, mask_scratch);
+ testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
- pop(mask_scratch);
+ Pop(mask_scratch);
}
// Value is white. We check whether it is data that doesn't need scanning.
@@ -4884,21 +4961,21 @@ void MacroAssembler::EnsureNotWhite(
bind(&not_external);
// Sequential string, either ASCII or UC16.
ASSERT(kOneByteStringTag == 0x04);
- and_(length, Immediate(kStringEncodingMask));
- xor_(length, Immediate(kStringEncodingMask));
- addq(length, Immediate(0x04));
+ andp(length, Immediate(kStringEncodingMask));
+ xorp(length, Immediate(kStringEncodingMask));
+ addp(length, Immediate(0x04));
// Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
- imul(length, FieldOperand(value, String::kLengthOffset));
+ imulp(length, FieldOperand(value, String::kLengthOffset));
shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
- addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, Immediate(~kObjectAlignmentMask));
+ addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+ andp(length, Immediate(~kObjectAlignmentMask));
bind(&is_data_object);
// Value is a data object, and it is white. Mark it black. Since we know
// that the object is white we can make it black by flipping one bit.
- or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
bind(&done);
@@ -4935,18 +5012,18 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
// Check that there are no elements. Register rcx contains the current JS
// object we've reached through the prototype chain.
Label no_elements;
- cmpq(empty_fixed_array_value,
+ cmpp(empty_fixed_array_value,
FieldOperand(rcx, JSObject::kElementsOffset));
j(equal, &no_elements);
// Second chance, the object may be using the empty slow element dictionary.
LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
- cmpq(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
+ cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
j(not_equal, call_runtime);
bind(&no_elements);
movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- cmpq(rcx, null_value);
+ cmpp(rcx, null_value);
j(not_equal, &next);
}
@@ -4959,12 +5036,12 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
- lea(scratch_reg, Operand(receiver_reg,
+ leap(scratch_reg, Operand(receiver_reg,
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
Move(kScratchRegister, new_space_start);
- cmpq(scratch_reg, kScratchRegister);
+ cmpp(scratch_reg, kScratchRegister);
j(less, no_memento_found);
- cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
+ cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
j(greater, no_memento_found);
CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
Heap::kAllocationMementoMapRootIndex);
@@ -4987,9 +5064,9 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
bind(&loop_again);
movp(current, FieldOperand(current, HeapObject::kMapOffset));
movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
- and_(scratch1, Immediate(Map::kElementsKindMask));
+ andp(scratch1, Immediate(Map::kElementsKindMask));
shr(scratch1, Immediate(Map::kElementsKindShift));
- cmpq(scratch1, Immediate(DICTIONARY_ELEMENTS));
+ cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
j(equal, found);
movp(current, FieldOperand(current, Map::kPrototypeOffset));
CompareRoot(current, Heap::kNullValueRootIndex);
@@ -4997,6 +5074,21 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
}
+void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
+ ASSERT(!dividend.is(rax));
+ ASSERT(!dividend.is(rdx));
+ MultiplierAndShift ms(divisor);
+ movl(rax, Immediate(ms.multiplier()));
+ imull(dividend);
+ if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend);
+ if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend);
+ if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift()));
+ movl(rax, dividend);
+ shrl(rax, Immediate(31));
+ addl(rdx, rax);
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 42245aa80..af65a6546 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -336,7 +336,7 @@ class MacroAssembler: public Assembler {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
Move(kRootRegister, roots_array_start);
- addq(kRootRegister, Immediate(kRootRegisterBias));
+ addp(kRootRegister, Immediate(kRootRegisterBias));
}
// ---------------------------------------------------------------------------
@@ -802,7 +802,7 @@ class MacroAssembler: public Assembler {
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
- void Set(const Operand& dst, int64_t x);
+ void Set(const Operand& dst, intptr_t x);
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
@@ -837,12 +837,16 @@ class MacroAssembler: public Assembler {
void Drop(int stack_elements);
void Call(Label* target) { call(target); }
- void Push(Register src) { push(src); }
- void Pop(Register dst) { pop(dst); }
- void PushReturnAddressFrom(Register src) { push(src); }
- void PopReturnAddressTo(Register dst) { pop(dst); }
+ void Push(Register src);
+ void Push(const Operand& src);
+ void Push(Immediate value);
+ void PushImm32(int32_t imm32);
+ void Pop(Register dst);
+ void Pop(const Operand& dst);
+ void PushReturnAddressFrom(Register src) { pushq(src); }
+ void PopReturnAddressTo(Register dst) { popq(dst); }
void Move(Register dst, ExternalReference ext) {
- movp(dst, reinterpret_cast<Address>(ext.address()),
+ movp(dst, reinterpret_cast<void*>(ext.address()),
RelocInfo::EXTERNAL_REFERENCE);
}
@@ -859,16 +863,18 @@ class MacroAssembler: public Assembler {
ASSERT(!RelocInfo::IsNone(rmode));
ASSERT(value->IsHeapObject());
ASSERT(!isolate()->heap()->InNewSpace(*value));
- movp(dst, value.location(), rmode);
+ movp(dst, reinterpret_cast<void*>(value.location()), rmode);
}
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(ExternalReference ext);
+ void Jump(const Operand& op);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
void Call(Address destination, RelocInfo::Mode rmode);
void Call(ExternalReference ext);
+ void Call(const Operand& op);
void Call(Handle<Code> code_object,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id = TypeFeedbackId::None());
@@ -1021,7 +1027,7 @@ class MacroAssembler: public Assembler {
static const int shift = Field::kShift + kSmiShift;
static const int mask = Field::kMask >> Field::kShift;
shr(reg, Immediate(shift));
- and_(reg, Immediate(mask));
+ andp(reg, Immediate(mask));
shl(reg, Immediate(kSmiShift));
}
@@ -1045,6 +1051,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object);
+
// Abort execution if argument is not the root value with the given index,
// enabled via --debug-code.
void AssertRootValue(Register src,
@@ -1232,15 +1242,8 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
- void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same.
@@ -1368,6 +1371,10 @@ class MacroAssembler: public Assembler {
Register filler);
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged, the result is in rdx, and rax gets clobbered.
+ void TruncatingDiv(Register dividend, int32_t divisor);
+
// ---------------------------------------------------------------------------
// StatsCounter support
@@ -1605,9 +1612,9 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
Address x64_coverage_function = FUNCTION_ADDR(LogGeneratedCodeCoverage); \
masm->pushfq(); \
masm->Pushad(); \
- masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
+ masm->Push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
masm->Call(x64_coverage_function, RelocInfo::EXTERNAL_REFERENCE); \
- masm->pop(rax); \
+ masm->Pop(rax); \
masm->Popad(); \
masm->popfq(); \
} \
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 75e70c597..c819c71cb 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -166,7 +166,7 @@ void RegExpMacroAssemblerX64::AdvanceRegister(int reg, int by) {
ASSERT(reg >= 0);
ASSERT(reg < num_registers_);
if (by != 0) {
- __ addq(register_location(reg), Immediate(by));
+ __ addp(register_location(reg), Immediate(by));
}
}
@@ -175,7 +175,7 @@ void RegExpMacroAssemblerX64::Backtrack() {
CheckPreemption();
// Pop Code* offset from backtrack stack, add Code* and jump to location.
Pop(rbx);
- __ addq(rbx, code_object_pointer());
+ __ addp(rbx, code_object_pointer());
__ jmp(rbx);
}
@@ -203,8 +203,8 @@ void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
__ cmpl(Operand(rbp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, &not_at_start);
// If we did, are we still at the start of the input?
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpq(rax, Operand(rbp, kInputStart));
+ __ leap(rax, Operand(rsi, rdi, times_1, 0));
+ __ cmpp(rax, Operand(rbp, kInputStart));
BranchOrBacktrack(equal, on_at_start);
__ bind(&not_at_start);
}
@@ -215,8 +215,8 @@ void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
__ cmpl(Operand(rbp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, on_not_at_start);
// If we did, are we still at the start of the input?
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpq(rax, Operand(rbp, kInputStart));
+ __ leap(rax, Operand(rsi, rdi, times_1, 0));
+ __ cmpp(rax, Operand(rbp, kInputStart));
BranchOrBacktrack(not_equal, on_not_at_start);
}
@@ -243,7 +243,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
Label fallthrough;
__ movq(rdx, register_location(start_reg)); // Offset of start of capture
__ movq(rbx, register_location(start_reg + 1)); // Offset of end of capture
- __ subq(rbx, rdx); // Length of capture.
+ __ subp(rbx, rdx); // Length of capture.
// -----------------------
// rdx = Start offset of capture.
@@ -273,9 +273,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
on_no_match = &backtrack_label_;
}
- __ lea(r9, Operand(rsi, rdx, times_1, 0));
- __ lea(r11, Operand(rsi, rdi, times_1, 0));
- __ addq(rbx, r9); // End of capture
+ __ leap(r9, Operand(rsi, rdx, times_1, 0));
+ __ leap(r11, Operand(rsi, rdi, times_1, 0));
+ __ addp(rbx, r9); // End of capture
// ---------------------
// r11 - current input character address
// r9 - current capture character address
@@ -293,8 +293,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Mismatch, try case-insensitive match (converting letters to lower-case).
// I.e., if or-ing with 0x20 makes values equal and in range 'a'-'z', it's
// a match.
- __ or_(rax, Immediate(0x20)); // Convert match character to lower-case.
- __ or_(rdx, Immediate(0x20)); // Convert capture character to lower-case.
+ __ orp(rax, Immediate(0x20)); // Convert match character to lower-case.
+ __ orp(rdx, Immediate(0x20)); // Convert capture character to lower-case.
__ cmpb(rax, rdx);
__ j(not_equal, on_no_match); // Definitely not equal.
__ subb(rax, Immediate('a'));
@@ -308,10 +308,10 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ j(equal, on_no_match);
__ bind(&loop_increment);
// Increment pointers into match and capture strings.
- __ addq(r11, Immediate(1));
- __ addq(r9, Immediate(1));
+ __ addp(r11, Immediate(1));
+ __ addp(r9, Immediate(1));
// Compare to end of capture, and loop if not done.
- __ cmpq(r9, rbx);
+ __ cmpp(r9, rbx);
__ j(below, &loop);
// Compute new value of character position after the matched part.
@@ -322,10 +322,10 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Save important/volatile registers before calling C function.
#ifndef _WIN64
// Caller save on Linux and callee save in Windows.
- __ push(rsi);
- __ push(rdi);
+ __ pushq(rsi);
+ __ pushq(rdi);
#endif
- __ push(backtrack_stackpointer());
+ __ pushq(backtrack_stackpointer());
static const int num_arguments = 4;
__ PrepareCallCFunction(num_arguments);
@@ -337,18 +337,18 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Isolate* isolate
#ifdef _WIN64
// Compute and set byte_offset1 (start of capture).
- __ lea(rcx, Operand(rsi, rdx, times_1, 0));
+ __ leap(rcx, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
- __ lea(rdx, Operand(rsi, rdi, times_1, 0));
+ __ leap(rdx, Operand(rsi, rdi, times_1, 0));
// Set byte_length.
__ movp(r8, rbx);
// Isolate.
__ LoadAddress(r9, ExternalReference::isolate_address(isolate()));
#else // AMD64 calling convention
// Compute byte_offset2 (current position = rsi+rdi).
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
+ __ leap(rax, Operand(rsi, rdi, times_1, 0));
// Compute and set byte_offset1 (start of capture).
- __ lea(rdi, Operand(rsi, rdx, times_1, 0));
+ __ leap(rdi, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
__ movp(rsi, rax);
// Set byte_length.
@@ -367,14 +367,14 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Restore original values before reacting on result value.
__ Move(code_object_pointer(), masm_.CodeObject());
- __ pop(backtrack_stackpointer());
+ __ popq(backtrack_stackpointer());
#ifndef _WIN64
- __ pop(rdi);
- __ pop(rsi);
+ __ popq(rdi);
+ __ popq(rsi);
#endif
// Check if function returned non-zero for success or zero for failure.
- __ testq(rax, rax);
+ __ testp(rax, rax);
BranchOrBacktrack(zero, on_no_match);
// On success, increment position by length of capture.
// Requires that rbx is callee save (true for both Win64 and AMD64 ABIs).
@@ -392,7 +392,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
// Find length of back-referenced capture.
__ movq(rdx, register_location(start_reg));
__ movq(rax, register_location(start_reg + 1));
- __ subq(rax, rdx); // Length to check.
+ __ subp(rax, rdx); // Length to check.
// Fail on partial or illegal capture (start of capture after end of capture).
// This must not happen (no back-reference can reference a capture that wasn't
@@ -412,9 +412,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
BranchOrBacktrack(greater, on_no_match);
// Compute pointers to match string and capture string
- __ lea(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
- __ addq(rdx, rsi); // Start of capture.
- __ lea(r9, Operand(rdx, rax, times_1, 0)); // End of capture
+ __ leap(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
+ __ addp(rdx, rsi); // Start of capture.
+ __ leap(r9, Operand(rdx, rax, times_1, 0)); // End of capture
// -----------------------
// rbx - current capture character address.
@@ -433,10 +433,10 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
}
BranchOrBacktrack(not_equal, on_no_match);
// Increment pointers into capture and match string.
- __ addq(rbx, Immediate(char_size()));
- __ addq(rdx, Immediate(char_size()));
+ __ addp(rbx, Immediate(char_size()));
+ __ addp(rdx, Immediate(char_size()));
// Check if we have reached end of match area.
- __ cmpq(rdx, r9);
+ __ cmpp(rdx, r9);
__ j(below, &loop);
// Success.
@@ -462,7 +462,7 @@ void RegExpMacroAssemblerX64::CheckCharacterAfterAnd(uint32_t c,
__ testl(current_character(), Immediate(mask));
} else {
__ movl(rax, Immediate(mask));
- __ and_(rax, current_character());
+ __ andp(rax, current_character());
__ cmpl(rax, Immediate(c));
}
BranchOrBacktrack(equal, on_equal);
@@ -476,7 +476,7 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterAnd(uint32_t c,
__ testl(current_character(), Immediate(mask));
} else {
__ movl(rax, Immediate(mask));
- __ and_(rax, current_character());
+ __ andp(rax, current_character());
__ cmpl(rax, Immediate(c));
}
BranchOrBacktrack(not_equal, on_not_equal);
@@ -489,8 +489,8 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd(
uc16 mask,
Label* on_not_equal) {
ASSERT(minus < String::kMaxUtf16CodeUnit);
- __ lea(rax, Operand(current_character(), -minus));
- __ and_(rax, Immediate(mask));
+ __ leap(rax, Operand(current_character(), -minus));
+ __ andp(rax, Immediate(mask));
__ cmpl(rax, Immediate(c));
BranchOrBacktrack(not_equal, on_not_equal);
}
@@ -523,7 +523,7 @@ void RegExpMacroAssemblerX64::CheckBitInTable(
Register index = current_character();
if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
__ movp(rbx, current_character());
- __ and_(rbx, Immediate(kTableMask));
+ __ andp(rbx, Immediate(kTableMask));
index = rbx;
}
__ cmpb(FieldOperand(rax, index, times_1, ByteArray::kHeaderSize),
@@ -536,7 +536,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
// (c - min) <= (max - min) check, using the sequence:
- // lea(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
+ // leap(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
// cmp(rax, Immediate(max - min))
switch (type) {
case 's':
@@ -547,7 +547,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
__ cmpl(current_character(), Immediate(' '));
__ j(equal, &success, Label::kNear);
// Check range 0x09..0x0d
- __ lea(rax, Operand(current_character(), -'\t'));
+ __ leap(rax, Operand(current_character(), -'\t'));
__ cmpl(rax, Immediate('\r' - '\t'));
__ j(below_equal, &success, Label::kNear);
// \u00a0 (NBSP).
@@ -562,20 +562,20 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
return false;
case 'd':
// Match ASCII digits ('0'..'9')
- __ lea(rax, Operand(current_character(), -'0'));
+ __ leap(rax, Operand(current_character(), -'0'));
__ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(above, on_no_match);
return true;
case 'D':
// Match non ASCII-digits
- __ lea(rax, Operand(current_character(), -'0'));
+ __ leap(rax, Operand(current_character(), -'0'));
__ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(below_equal, on_no_match);
return true;
case '.': {
// Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
- __ xor_(rax, Immediate(0x01));
+ __ xorp(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ subl(rax, Immediate(0x0b));
__ cmpl(rax, Immediate(0x0c - 0x0b));
@@ -593,7 +593,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
case 'n': {
// Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
- __ xor_(rax, Immediate(0x01));
+ __ xorp(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ subl(rax, Immediate(0x0b));
__ cmpl(rax, Immediate(0x0c - 0x0b));
@@ -674,7 +674,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
FrameScope scope(&masm_, StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
- __ push(rbp);
+ __ pushq(rbp);
__ movp(rbp, rsp);
// Save parameters and callee-save registers. Order here should correspond
// to order of kBackup_ebx etc.
@@ -686,9 +686,9 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movq(Operand(rbp, kInputStart), r8);
__ movq(Operand(rbp, kInputEnd), r9);
// Callee-save on Win64.
- __ push(rsi);
- __ push(rdi);
- __ push(rbx);
+ __ pushq(rsi);
+ __ pushq(rdi);
+ __ pushq(rbx);
#else
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9 (and then on stack).
// Push register parameters on stack for reference.
@@ -698,18 +698,18 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ASSERT_EQ(kInputEnd, -4 * kPointerSize);
ASSERT_EQ(kRegisterOutput, -5 * kPointerSize);
ASSERT_EQ(kNumOutputRegisters, -6 * kPointerSize);
- __ push(rdi);
- __ push(rsi);
- __ push(rdx);
- __ push(rcx);
- __ push(r8);
- __ push(r9);
-
- __ push(rbx); // Callee-save
+ __ pushq(rdi);
+ __ pushq(rsi);
+ __ pushq(rdx);
+ __ pushq(rcx);
+ __ pushq(r8);
+ __ pushq(r9);
+
+ __ pushq(rbx); // Callee-save
#endif
- __ push(Immediate(0)); // Number of successful matches in a global regexp.
- __ push(Immediate(0)); // Make room for "input start - 1" constant.
+ __ Push(Immediate(0)); // Number of successful matches in a global regexp.
+ __ Push(Immediate(0)); // Make room for "input start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -719,12 +719,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ExternalReference::address_of_stack_limit(isolate());
__ movp(rcx, rsp);
__ Move(kScratchRegister, stack_limit);
- __ subq(rcx, Operand(kScratchRegister, 0));
+ __ subp(rcx, Operand(kScratchRegister, 0));
// Handle it if the stack pointer is already below the stack limit.
__ j(below_equal, &stack_limit_hit);
// Check if there is room for the variable number of registers above
// the stack limit.
- __ cmpq(rcx, Immediate(num_registers_ * kPointerSize));
+ __ cmpp(rcx, Immediate(num_registers_ * kPointerSize));
__ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
@@ -734,28 +734,28 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&stack_limit_hit);
__ Move(code_object_pointer(), masm_.CodeObject());
CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
- __ testq(rax, rax);
+ __ testp(rax, rax);
// If returned value is non-zero, we exit with the returned value as result.
__ j(not_zero, &return_rax);
__ bind(&stack_ok);
// Allocate space on stack for registers.
- __ subq(rsp, Immediate(num_registers_ * kPointerSize));
+ __ subp(rsp, Immediate(num_registers_ * kPointerSize));
// Load string length.
__ movp(rsi, Operand(rbp, kInputEnd));
// Load input position.
__ movp(rdi, Operand(rbp, kInputStart));
// Set up rdi to be negative offset from string end.
- __ subq(rdi, rsi);
+ __ subp(rdi, rsi);
// Set rax to address of char before start of the string
// (effectively string position -1).
__ movp(rbx, Operand(rbp, kStartIndex));
- __ neg(rbx);
+ __ negq(rbx);
if (mode_ == UC16) {
- __ lea(rax, Operand(rdi, rbx, times_2, -char_size()));
+ __ leap(rax, Operand(rdi, rbx, times_2, -char_size()));
} else {
- __ lea(rax, Operand(rdi, rbx, times_1, -char_size()));
+ __ leap(rax, Operand(rdi, rbx, times_1, -char_size()));
}
// Store this value in a local variable, for use when clearing
// position registers.
@@ -824,11 +824,11 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movp(rdx, Operand(rbp, kStartIndex));
__ movp(rbx, Operand(rbp, kRegisterOutput));
__ movp(rcx, Operand(rbp, kInputEnd));
- __ subq(rcx, Operand(rbp, kInputStart));
+ __ subp(rcx, Operand(rbp, kInputStart));
if (mode_ == UC16) {
- __ lea(rcx, Operand(rcx, rdx, times_2, 0));
+ __ leap(rcx, Operand(rcx, rdx, times_2, 0));
} else {
- __ addq(rcx, rdx);
+ __ addp(rcx, rdx);
}
for (int i = 0; i < num_saved_registers_; i++) {
__ movq(rax, register_location(i));
@@ -836,7 +836,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Keep capture start in rdx for the zero-length check later.
__ movp(rdx, rax);
}
- __ addq(rax, rcx); // Convert to index from start, not end.
+ __ addp(rax, rcx); // Convert to index from start, not end.
if (mode_ == UC16) {
__ sar(rax, Immediate(1)); // Convert byte index to character index.
}
@@ -847,18 +847,18 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (global()) {
// Restart matching if the regular expression is flagged as global.
// Increment success counter.
- __ incq(Operand(rbp, kSuccessfulCaptures));
+ __ incp(Operand(rbp, kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ movsxlq(rcx, Operand(rbp, kNumOutputRegisters));
- __ subq(rcx, Immediate(num_saved_registers_));
+ __ subp(rcx, Immediate(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
- __ cmpq(rcx, Immediate(num_saved_registers_));
+ __ cmpp(rcx, Immediate(num_saved_registers_));
__ j(less, &exit_label_);
__ movp(Operand(rbp, kNumOutputRegisters), rcx);
// Advance the location for output.
- __ addq(Operand(rbp, kRegisterOutput),
+ __ addp(Operand(rbp, kRegisterOutput),
Immediate(num_saved_registers_ * kIntSize));
// Prepare rax to initialize registers with its value in the next run.
@@ -867,11 +867,11 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// rdx: capture start index
- __ cmpq(rdi, rdx);
+ __ cmpp(rdi, rdx);
// Not a zero-length match, restart.
__ j(not_equal, &load_char_start_regexp);
// rdi (offset from the end) is zero if we already reached the end.
- __ testq(rdi, rdi);
+ __ testp(rdi, rdi);
__ j(zero, &exit_label_, Label::kNear);
// Advance current position after a zero-length match.
if (mode_ == UC16) {
@@ -896,10 +896,10 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&return_rax);
#ifdef _WIN64
// Restore callee save registers.
- __ lea(rsp, Operand(rbp, kLastCalleeSaveRegister));
- __ pop(rbx);
- __ pop(rdi);
- __ pop(rsi);
+ __ leap(rsp, Operand(rbp, kLastCalleeSaveRegister));
+ __ popq(rbx);
+ __ popq(rdi);
+ __ popq(rsi);
// Stack now at rbp.
#else
// Restore callee save register.
@@ -908,7 +908,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movp(rsp, rbp);
#endif
// Exit function frame, restore previous one.
- __ pop(rbp);
+ __ popq(rbp);
__ ret(0);
// Backtrack code (branch target for conditional backtracks).
@@ -923,19 +923,19 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
- __ push(backtrack_stackpointer());
- __ push(rdi);
+ __ pushq(backtrack_stackpointer());
+ __ pushq(rdi);
CallCheckStackGuardState();
- __ testq(rax, rax);
+ __ testp(rax, rax);
// If returning non-zero, we should end execution with the given
// result as return value.
__ j(not_zero, &return_rax);
// Restore registers.
__ Move(code_object_pointer(), masm_.CodeObject());
- __ pop(rdi);
- __ pop(backtrack_stackpointer());
+ __ popq(rdi);
+ __ popq(backtrack_stackpointer());
// String might have moved: Reload esi from frame.
__ movp(rsi, Operand(rbp, kInputEnd));
SafeReturn();
@@ -950,8 +950,8 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Save registers before calling C function
#ifndef _WIN64
// Callee-save in Microsoft 64-bit ABI, but not in AMD64 ABI.
- __ push(rsi);
- __ push(rdi);
+ __ pushq(rsi);
+ __ pushq(rdi);
#endif
// Call GrowStack(backtrack_stackpointer())
@@ -960,12 +960,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
#ifdef _WIN64
// Microsoft passes parameters in rcx, rdx, r8.
// First argument, backtrack stackpointer, is already in rcx.
- __ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument
+ __ leap(rdx, Operand(rbp, kStackHighEnd)); // Second argument
__ LoadAddress(r8, ExternalReference::isolate_address(isolate()));
#else
// AMD64 ABI passes parameters in rdi, rsi, rdx.
__ movp(rdi, backtrack_stackpointer()); // First argument.
- __ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
+ __ leap(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
__ LoadAddress(rdx, ExternalReference::isolate_address(isolate()));
#endif
ExternalReference grow_stack =
@@ -973,15 +973,15 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(equal, &exit_with_exception);
// Otherwise use return value as new stack pointer.
__ movp(backtrack_stackpointer(), rax);
// Restore saved registers and continue.
__ Move(code_object_pointer(), masm_.CodeObject());
#ifndef _WIN64
- __ pop(rdi);
- __ pop(rsi);
+ __ popq(rdi);
+ __ popq(rsi);
#endif
SafeReturn();
}
@@ -1015,7 +1015,7 @@ void RegExpMacroAssemblerX64::GoTo(Label* to) {
void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
int comparand,
Label* if_ge) {
- __ cmpq(register_location(reg), Immediate(comparand));
+ __ cmpp(register_location(reg), Immediate(comparand));
BranchOrBacktrack(greater_equal, if_ge);
}
@@ -1023,14 +1023,14 @@ void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
void RegExpMacroAssemblerX64::IfRegisterLT(int reg,
int comparand,
Label* if_lt) {
- __ cmpq(register_location(reg), Immediate(comparand));
+ __ cmpp(register_location(reg), Immediate(comparand));
BranchOrBacktrack(less, if_lt);
}
void RegExpMacroAssemblerX64::IfRegisterEqPos(int reg,
Label* if_eq) {
- __ cmpq(rdi, register_location(reg));
+ __ cmpp(rdi, register_location(reg));
BranchOrBacktrack(equal, if_eq);
}
@@ -1091,13 +1091,13 @@ void RegExpMacroAssemblerX64::ReadCurrentPositionFromRegister(int reg) {
void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
__ movq(backtrack_stackpointer(), register_location(reg));
- __ addq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
+ __ addp(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
}
void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
Label after_position;
- __ cmpq(rdi, Immediate(-by * char_size()));
+ __ cmpp(rdi, Immediate(-by * char_size()));
__ j(greater_equal, &after_position, Label::kNear);
__ movq(rdi, Immediate(-by * char_size()));
// On RegExp code entry (where this operation is used), the character before
@@ -1125,7 +1125,7 @@ void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg,
if (cp_offset == 0) {
__ movp(register_location(reg), rdi);
} else {
- __ lea(rax, Operand(rdi, cp_offset * char_size()));
+ __ leap(rax, Operand(rdi, cp_offset * char_size()));
__ movp(register_location(reg), rax);
}
}
@@ -1142,7 +1142,7 @@ void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
__ movp(rax, backtrack_stackpointer());
- __ subq(rax, Operand(rbp, kStackHighEnd));
+ __ subp(rax, Operand(rbp, kStackHighEnd));
__ movp(register_location(reg), rax);
}
@@ -1161,7 +1161,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
__ movp(r8, rbp);
// First argument: Next address on the stack (will be address of
// return address).
- __ lea(rcx, Operand(rsp, -kPointerSize));
+ __ leap(rcx, Operand(rsp, -kPointerSize));
#else
// Third argument: RegExp code frame pointer.
__ movp(rdx, rbp);
@@ -1169,7 +1169,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
__ movp(rsi, code_object_pointer());
// First argument: Next address on the stack (will be address of
// return address).
- __ lea(rdi, Operand(rsp, -kPointerSize));
+ __ leap(rdi, Operand(rsp, -kRegisterSize));
#endif
ExternalReference stack_check =
ExternalReference::re_check_stack_guard_state(isolate());
@@ -1323,12 +1323,12 @@ void RegExpMacroAssemblerX64::SafeCall(Label* to) {
void RegExpMacroAssemblerX64::SafeCallTarget(Label* label) {
__ bind(label);
- __ subq(Operand(rsp, 0), code_object_pointer());
+ __ subp(Operand(rsp, 0), code_object_pointer());
}
void RegExpMacroAssemblerX64::SafeReturn() {
- __ addq(Operand(rsp, 0), code_object_pointer());
+ __ addp(Operand(rsp, 0), code_object_pointer());
__ ret(0);
}
@@ -1336,14 +1336,14 @@ void RegExpMacroAssemblerX64::SafeReturn() {
void RegExpMacroAssemblerX64::Push(Register source) {
ASSERT(!source.is(backtrack_stackpointer()));
// Notice: This updates flags, unlike normal Push.
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), source);
}
void RegExpMacroAssemblerX64::Push(Immediate value) {
// Notice: This updates flags, unlike normal Push.
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), value);
}
@@ -1367,7 +1367,7 @@ void RegExpMacroAssemblerX64::FixupCodeRelativePositions() {
void RegExpMacroAssemblerX64::Push(Label* backtrack_target) {
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), backtrack_target);
MarkPositionForCodeRelativeFixup();
}
@@ -1377,12 +1377,12 @@ void RegExpMacroAssemblerX64::Pop(Register target) {
ASSERT(!target.is(backtrack_stackpointer()));
__ movsxlq(target, Operand(backtrack_stackpointer(), 0));
// Notice: This updates flags, unlike normal Pop.
- __ addq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ addp(backtrack_stackpointer(), Immediate(kIntSize));
}
void RegExpMacroAssemblerX64::Drop() {
- __ addq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ addp(backtrack_stackpointer(), Immediate(kIntSize));
}
@@ -1392,7 +1392,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ load_rax(stack_limit);
- __ cmpq(rsp, rax);
+ __ cmpp(rsp, rax);
__ j(above, &no_preempt);
SafeCall(&check_preempt_label_);
@@ -1406,7 +1406,7 @@ void RegExpMacroAssemblerX64::CheckStackLimit() {
ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit(isolate());
__ load_rax(stack_limit);
- __ cmpq(backtrack_stackpointer(), rax);
+ __ cmpp(backtrack_stackpointer(), rax);
__ j(above, &no_stack_overflow);
SafeCall(&stack_overflow_label_);
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index a43d709b1..13e822da2 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -49,10 +49,12 @@ static void ProbeTable(Isolate* isolate,
// The offset is scaled by 4, based on
// kHeapObjectTagSize, which is two bits
Register offset) {
- // We need to scale up the pointer by 2 because the offset is scaled by less
+ // We need to scale up the pointer by 2 when the offset is scaled by less
// than the pointer size.
- ASSERT(kPointerSizeLog2 == kHeapObjectTagSize + 1);
- ScaleFactor scale_factor = times_2;
+ ASSERT(kPointerSize == kInt64Size
+ ? kPointerSizeLog2 == kHeapObjectTagSize + 1
+ : kPointerSizeLog2 == kHeapObjectTagSize);
+ ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1;
ASSERT_EQ(3 * kPointerSize, sizeof(StubCache::Entry));
// The offset register holds the entry offset times four (due to masking
@@ -62,7 +64,7 @@ static void ProbeTable(Isolate* isolate,
Label miss;
// Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ lea(offset, Operand(offset, offset, times_2, 0));
+ __ leap(offset, Operand(offset, offset, times_2, 0));
__ LoadAddress(kScratchRegister, key_offset);
@@ -77,7 +79,7 @@ static void ProbeTable(Isolate* isolate,
// Use key_offset + kPointerSize * 2, rather than loading map_offset.
__ movp(kScratchRegister,
Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
- __ cmpq(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Get the code entry from the cache.
@@ -87,7 +89,7 @@ static void ProbeTable(Isolate* isolate,
// Check that the flags match what we're looking for.
__ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
- __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
+ __ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup));
__ cmpl(offset, Immediate(flags));
__ j(not_equal, &miss);
@@ -100,7 +102,7 @@ static void ProbeTable(Isolate* isolate,
#endif
// Jump to the first instruction in the code stub.
- __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(kScratchRegister);
__ bind(&miss);
@@ -193,10 +195,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
// Use only the low 32 bits of the map pointer.
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
+ __ xorp(scratch, Immediate(flags));
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+ __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
// Probe the primary table.
ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
@@ -204,11 +206,11 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
// Primary miss: Compute hash for secondary probe.
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+ __ xorp(scratch, Immediate(flags));
+ __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
__ subl(scratch, name);
__ addl(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+ __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
// Probe the secondary table.
ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
@@ -281,54 +283,6 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
-// Generate code to check if an object is a string. If the object is
-// a string, the map's instance type is left in the scratch register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* smi,
- Label* non_string_object) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ movp(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movzxbq(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ testl(scratch, Immediate(kNotStringTag));
- __ j(not_zero, non_string_object);
-}
-
-
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
-
- // Load length directly from the string.
- __ movp(rax, FieldOperand(receiver, String::kLengthOffset));
- __ ret(0);
-
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
- __ j(not_equal, miss);
-
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ movp(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ movp(rax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
-}
-
-
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register result,
@@ -346,7 +300,7 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
bool inobject,
int index,
Representation representation) {
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -368,13 +322,13 @@ static void PushInterceptorArguments(MacroAssembler* masm,
STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
- __ push(name);
+ __ Push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
__ Move(kScratchRegister, interceptor);
- __ push(kScratchRegister);
- __ push(receiver);
- __ push(holder);
+ __ Push(kScratchRegister);
+ __ Push(receiver);
+ __ Push(holder);
}
@@ -393,24 +347,25 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map,
- Register receiver,
- Register scratch_in,
- int argc,
- Register* values) {
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
ASSERT(optimization.is_simple_api_call());
__ PopReturnAddressTo(scratch_in);
// receiver
- __ push(receiver);
+ __ Push(receiver);
// Write the arguments to stack frame.
for (int i = 0; i < argc; i++) {
Register arg = values[argc-1-i];
ASSERT(!receiver.is(arg));
ASSERT(!scratch_in.is(arg));
- __ push(arg);
+ __ Push(arg);
}
__ PushReturnAddressFrom(scratch_in);
// Stack now matches JSFunction abi.
@@ -465,7 +420,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
api_function_address, function_address, RelocInfo::EXTERNAL_REFERENCE);
// Jump to stub.
- CallApiFunctionStub stub(true, call_data_undefined, argc);
+ CallApiFunctionStub stub(is_store, call_data_undefined, argc);
__ TailCallStub(&stub);
}
@@ -536,11 +491,11 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
__ Cmp(value_reg, constant);
__ j(not_equal, miss_label);
- } else if (FLAG_track_fields && representation.IsSmi()) {
+ } else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
Label do_store, heap_number;
__ AllocateHeapNumber(storage_reg, scratch1, slow);
@@ -568,9 +523,9 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ PopReturnAddressTo(scratch1);
- __ push(receiver_reg);
+ __ Push(receiver_reg);
__ Push(transition);
- __ push(value_reg);
+ __ Push(value_reg);
__ PushReturnAddressFrom(scratch1);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
@@ -613,15 +568,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ movp(FieldOperand(receiver_reg, offset), storage_reg);
} else {
__ movp(FieldOperand(receiver_reg, offset), value_reg);
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ movp(storage_reg, value_reg);
}
__ RecordWriteField(
@@ -633,15 +588,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ movp(FieldOperand(scratch1, offset), storage_reg);
} else {
__ movp(FieldOperand(scratch1, offset), value_reg);
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ movp(storage_reg, value_reg);
}
__ RecordWriteField(
@@ -680,11 +635,11 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -723,7 +678,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
int offset = object->map()->instance_size() + (index * kPointerSize);
__ movp(FieldOperand(receiver_reg, offset), value_reg);
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ movp(name_reg, value_reg);
@@ -738,7 +693,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
__ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
__ movp(FieldOperand(scratch1, offset), value_reg);
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ movp(name_reg, value_reg);
@@ -773,9 +728,6 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
- // Make sure that the type feedback oracle harvests the receiver map.
- // TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ Move(scratch1, receiver_map);
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -941,7 +893,7 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
Operand(dictionary, index, times_pointer_size,
kValueOffset - kHeapObjectTag));
__ Move(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(scratch2(), scratch3());
+ __ cmpp(scratch2(), scratch3());
__ j(not_equal, &miss);
}
@@ -970,15 +922,6 @@ void LoadStubCompiler::GenerateLoadField(Register reg,
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization,
- Handle<Map> receiver_map) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver_map,
- receiver(), scratch1(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
@@ -992,22 +935,22 @@ void LoadStubCompiler::GenerateLoadCallback(
STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
- __ push(receiver()); // receiver
+ __ Push(receiver()); // receiver
if (heap()->InNewSpace(callback->data())) {
ASSERT(!scratch2().is(reg));
__ Move(scratch2(), callback);
- __ push(FieldOperand(scratch2(),
+ __ Push(FieldOperand(scratch2(),
ExecutableAccessorInfo::kDataOffset)); // data
} else {
__ Push(Handle<Object>(callback->data(), isolate()));
}
ASSERT(!kScratchRegister.is(reg));
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ push(kScratchRegister); // return value
- __ push(kScratchRegister); // return value default
+ __ Push(kScratchRegister); // return value
+ __ Push(kScratchRegister); // return value default
__ PushAddress(ExternalReference::isolate_address(isolate()));
- __ push(reg); // holder
- __ push(name()); // name
+ __ Push(reg); // holder
+ __ Push(name()); // name
// Save a pointer to where we pushed the arguments pointer. This will be
// passed as the const PropertyAccessorInfo& to the C++ callback.
@@ -1075,10 +1018,10 @@ void LoadStubCompiler::GenerateLoadInterceptor(
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
- __ push(receiver());
+ __ Push(receiver());
}
- __ push(holder_reg);
- __ push(this->name());
+ __ Push(holder_reg);
+ __ Push(this->name());
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
@@ -1096,10 +1039,10 @@ void LoadStubCompiler::GenerateLoadInterceptor(
__ ret(0);
__ bind(&interceptor_failed);
- __ pop(this->name());
- __ pop(holder_reg);
+ __ Pop(this->name());
+ __ Pop(holder_reg);
if (must_preserve_receiver_reg) {
- __ pop(receiver());
+ __ Pop(receiver());
}
// Leave the internal frame.
@@ -1141,11 +1084,11 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
__ PopReturnAddressTo(scratch1());
- __ push(receiver());
- __ push(holder_reg);
+ __ Push(receiver());
+ __ Push(holder_reg);
__ Push(callback); // callback info
__ Push(name);
- __ push(value());
+ __ Push(value());
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
@@ -1158,24 +1101,6 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, handle(object->map()),
- receiver(), scratch1(), 1, values);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
@@ -1183,20 +1108,16 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
- Register receiver = rdx;
- Register value = rax;
// Save value register, so we can restore it later.
- __ push(value);
+ __ Push(value());
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
@@ -1205,8 +1126,8 @@ void StoreStubCompiler::GenerateStoreViaSetter(
__ movp(receiver,
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
}
- __ push(receiver);
- __ push(value);
+ __ Push(receiver);
+ __ Push(value());
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
@@ -1218,7 +1139,7 @@ void StoreStubCompiler::GenerateStoreViaSetter(
}
// We have to return the passed value, not the return value of the setter.
- __ pop(rax);
+ __ Pop(rax);
// Restore context register.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -1235,9 +1156,9 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
__ PopReturnAddressTo(scratch1());
- __ push(receiver());
- __ push(this->name());
- __ push(value());
+ __ Push(receiver());
+ __ Push(this->name());
+ __ Push(value());
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
@@ -1250,6 +1171,20 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
}
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ PopReturnAddressTo(scratch1());
+ __ Push(receiver());
+ __ Push(value());
+ __ PushReturnAddressFrom(scratch1());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
@@ -1314,16 +1249,21 @@ Register* KeyedLoadStubCompiler::registers() {
}
+Register StoreStubCompiler::value() {
+ return rax;
+}
+
+
Register* StoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { rdx, rcx, rax, rbx, rdi, r8 };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { rdx, rcx, rbx, rdi, r8 };
return registers;
}
Register* KeyedStoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { rdx, rcx, rax, rbx, rdi, r8 };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { rdx, rcx, rbx, rdi, r8 };
return registers;
}
@@ -1351,7 +1291,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
__ movp(receiver,
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
}
- __ push(receiver);
+ __ Push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
diff --git a/deps/v8/src/zone-allocator.h b/deps/v8/src/zone-allocator.h
index 5245c6b1b..7ed171390 100644
--- a/deps/v8/src/zone-allocator.h
+++ b/deps/v8/src/zone-allocator.h
@@ -50,7 +50,9 @@ class zone_allocator {
explicit zone_allocator(Zone* zone) throw() : zone_(zone) {}
explicit zone_allocator(const zone_allocator& other) throw()
: zone_(other.zone_) {}
- template<typename U> zone_allocator(const zone_allocator<U>&) throw() {}
+ template<typename U> zone_allocator(const zone_allocator<U>& other) throw()
+ : zone_(other.zone_) {}
+ template<typename U> friend class zone_allocator;
pointer address(reference x) const {return &x;}
const_pointer address(const_reference x) const {return &x;}
@@ -69,9 +71,17 @@ class zone_allocator {
void construct(pointer p, const T& val) {
new(static_cast<void*>(p)) T(val);
}
- void destroy(pointer p) { (static_cast<T*>(p))->~T(); }
+ void destroy(pointer p) { p->~T(); }
+
+ bool operator==(zone_allocator const& other) {
+ return zone_ == other.zone_;
+ }
+ bool operator!=(zone_allocator const& other) {
+ return zone_ != other.zone_;
+ }
private:
+ zone_allocator();
Zone* zone_;
};
diff --git a/deps/v8/src/zone-inl.h b/deps/v8/src/zone-inl.h
index f257382a2..9b82c0540 100644
--- a/deps/v8/src/zone-inl.h
+++ b/deps/v8/src/zone-inl.h
@@ -30,6 +30,12 @@
#include "zone.h"
+#ifdef V8_USE_ADDRESS_SANITIZER
+ #include <sanitizer/asan_interface.h>
+#else
+ #define ASAN_UNPOISON_MEMORY_REGION(start, size) ((void) 0)
+#endif
+
#include "counters.h"
#include "isolate.h"
#include "utils.h"
@@ -39,6 +45,9 @@ namespace v8 {
namespace internal {
+static const int kASanRedzoneBytes = 24; // Must be a multiple of 8.
+
+
inline void* Zone::New(int size) {
// Round up the requested size to fit the alignment.
size = RoundUp(size, kAlignment);
@@ -54,12 +63,25 @@ inline void* Zone::New(int size) {
// Check if the requested size is available without expanding.
Address result = position_;
- if (size > limit_ - position_) {
- result = NewExpand(size);
+ int size_with_redzone =
+#ifdef V8_USE_ADDRESS_SANITIZER
+ size + kASanRedzoneBytes;
+#else
+ size;
+#endif
+
+ if (size_with_redzone > limit_ - position_) {
+ result = NewExpand(size_with_redzone);
} else {
- position_ += size;
+ position_ += size_with_redzone;
}
+#ifdef V8_USE_ADDRESS_SANITIZER
+ Address redzone_position = result + size;
+ ASSERT(redzone_position + kASanRedzoneBytes == position_);
+ ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes);
+#endif
+
// Check that the result has the proper alignment and return it.
ASSERT(IsAddressAligned(result, kAlignment, 0));
allocation_size_ += size;
@@ -69,6 +91,7 @@ inline void* Zone::New(int size) {
template <typename T>
T* Zone::NewArray(int length) {
+ CHECK(std::numeric_limits<int>::max() / static_cast<int>(sizeof(T)) > length);
return static_cast<T*>(New(length * sizeof(T)));
}
diff --git a/deps/v8/src/zone.cc b/deps/v8/src/zone.cc
index 417f895e5..4f9137129 100644
--- a/deps/v8/src/zone.cc
+++ b/deps/v8/src/zone.cc
@@ -104,6 +104,8 @@ void Zone::DeleteAll() {
} else {
int size = current->size();
#ifdef DEBUG
+ // Un-poison first so the zapping doesn't trigger ASan complaints.
+ ASAN_UNPOISON_MEMORY_REGION(current, size);
// Zap the entire current segment (including the header).
memset(current, kZapDeadByte, size);
#endif
@@ -120,6 +122,8 @@ void Zone::DeleteAll() {
Address start = keep->start();
position_ = RoundUp(start, kAlignment);
limit_ = keep->end();
+ // Un-poison so we can re-use the segment later.
+ ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity());
#ifdef DEBUG
// Zap the contents of the kept segment (but not the header).
memset(start, kZapDeadByte, keep->capacity());
@@ -143,6 +147,8 @@ void Zone::DeleteKeptSegment() {
if (segment_head_ != NULL) {
int size = segment_head_->size();
#ifdef DEBUG
+ // Un-poison first so the zapping doesn't trigger ASan complaints.
+ ASAN_UNPOISON_MEMORY_REGION(segment_head_, size);
// Zap the entire kept segment (including the header).
memset(segment_head_, kZapDeadByte, size);
#endif
diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone.h
index bd7cc39b0..83421b396 100644
--- a/deps/v8/src/zone.h
+++ b/deps/v8/src/zone.h
@@ -38,6 +38,11 @@
namespace v8 {
namespace internal {
+#if defined(__has_feature)
+ #if __has_feature(address_sanitizer)
+ #define V8_USE_ADDRESS_SANITIZER
+ #endif
+#endif
class Segment;
class Isolate;
@@ -89,8 +94,13 @@ class Zone {
// All pointers returned from New() have this alignment. In addition, if the
// object being allocated has a size that is divisible by 8 then its alignment
- // will be 8.
+ // will be 8. ASan requires 8-byte alignment.
+#ifdef V8_USE_ADDRESS_SANITIZER
+ static const int kAlignment = 8;
+ STATIC_ASSERT(kPointerSize <= 8);
+#else
static const int kAlignment = kPointerSize;
+#endif
// Never allocate segments smaller than this size in bytes.
static const int kMinimumSegmentSize = 8 * KB;
diff --git a/deps/v8/test/benchmarks/benchmarks.status b/deps/v8/test/benchmarks/benchmarks.status
index 103eaeb12..d651b3c0f 100644
--- a/deps/v8/test/benchmarks/benchmarks.status
+++ b/deps/v8/test/benchmarks/benchmarks.status
@@ -25,9 +25,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# Too slow in Debug mode.
[
-['mode == debug', {
- # Too slow in Debug mode.
- 'octane/mandreel': [SKIP],
-}], # 'mode == debug'
+[ALWAYS, {
+ 'octane/mandreel': [PASS, ['mode == debug', SKIP]],
+}], # ALWAYS
]
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index 996db3eea..ec5b08dd2 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -53,6 +53,7 @@
'test-alloc.cc',
'test-api.cc',
'test-ast.cc',
+ 'test-atomicops.cc',
'test-bignum.cc',
'test-bignum-dtoa.cc',
'test-circular-queue.cc',
@@ -88,6 +89,7 @@
'test-liveedit.cc',
'test-lockers.cc',
'test-log.cc',
+ 'test-microtask-delivery.cc',
'test-mark-compact.cc',
'test-mementos.cc',
'test-mutex.cc',
@@ -138,6 +140,7 @@
'test-code-stubs.cc',
'test-code-stubs-x64.cc',
'test-cpu-x64.cc',
+ 'test-disasm-x64.cc',
'test-macro-assembler-x64.cc',
'test-log-stack-tracer.cc'
],
@@ -151,6 +154,18 @@
'test-macro-assembler-arm.cc'
],
}],
+ ['v8_target_arch=="arm64"', {
+ 'sources': [
+ 'test-utils-arm64.cc',
+ 'test-assembler-arm64.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-arm64.cc',
+ 'test-disasm-arm64.cc',
+ 'test-fuzz-arm64.cc',
+ 'test-javascript-arm64.cc',
+ 'test-js-arm64-variables.cc'
+ ],
+ }],
['v8_target_arch=="mipsel"', {
'sources': [
'test-assembler-mips.cc',
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index d9f76294e..635983523 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -308,27 +308,89 @@ static inline v8::Local<v8::Script> v8_compile(const char* x) {
}
-// Helper function that compiles and runs the source.
+static inline v8::Local<v8::Script> v8_compile(v8::Local<v8::String> x) {
+ return v8::Script::Compile(x);
+}
+
+
+static inline v8::Local<v8::Script> CompileWithOrigin(
+ v8::Local<v8::String> source, v8::Local<v8::String> origin_url) {
+ v8::ScriptOrigin origin(origin_url);
+ v8::ScriptCompiler::Source script_source(source, origin);
+ return v8::ScriptCompiler::Compile(
+ v8::Isolate::GetCurrent(), &script_source);
+}
+
+
+static inline v8::Local<v8::Script> CompileWithOrigin(
+ v8::Local<v8::String> source, const char* origin_url) {
+ return CompileWithOrigin(source, v8_str(origin_url));
+}
+
+
+static inline v8::Local<v8::Script> CompileWithOrigin(const char* source,
+ const char* origin_url) {
+ return CompileWithOrigin(v8_str(source), v8_str(origin_url));
+}
+
+
+// Helper functions that compile and run the source.
static inline v8::Local<v8::Value> CompileRun(const char* source) {
- return v8::Script::Compile(
- v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), source))->Run();
+ return v8::Script::Compile(v8_str(source))->Run();
}
-// Helper function that compiles and runs the source with given origin.
+static inline v8::Local<v8::Value> CompileRun(v8::Local<v8::String> source) {
+ return v8::Script::Compile(source)->Run();
+}
+
+
+static inline v8::Local<v8::Value> PreCompileCompileRun(const char* source) {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Local<v8::String> source_string =
+ v8::String::NewFromUtf8(isolate, source);
+ v8::ScriptData* preparse = v8::ScriptData::PreCompile(source_string);
+ v8::ScriptCompiler::Source script_source(
+ source_string, new v8::ScriptCompiler::CachedData(
+ reinterpret_cast<const uint8_t*>(preparse->Data()),
+ preparse->Length()));
+ v8::Local<v8::Script> script =
+ v8::ScriptCompiler::Compile(isolate, &script_source);
+ v8::Local<v8::Value> result = script->Run();
+ delete preparse;
+ return result;
+}
+
+
+// Helper functions that compile and run the source with given origin.
static inline v8::Local<v8::Value> CompileRunWithOrigin(const char* source,
const char* origin_url,
int line_number,
int column_number) {
v8::Isolate* isolate = v8::Isolate::GetCurrent();
- v8::ScriptOrigin origin(v8::String::NewFromUtf8(isolate, origin_url),
+ v8::ScriptOrigin origin(v8_str(origin_url),
v8::Integer::New(isolate, line_number),
v8::Integer::New(isolate, column_number));
- return v8::Script::Compile(v8::String::NewFromUtf8(isolate, source), &origin)
+ v8::ScriptCompiler::Source script_source(v8_str(source), origin);
+ return v8::ScriptCompiler::Compile(isolate, &script_source)->Run();
+}
+
+
+static inline v8::Local<v8::Value> CompileRunWithOrigin(
+ v8::Local<v8::String> source, const char* origin_url) {
+ v8::ScriptCompiler::Source script_source(
+ source, v8::ScriptOrigin(v8_str(origin_url)));
+ return v8::ScriptCompiler::Compile(v8::Isolate::GetCurrent(), &script_source)
->Run();
}
+static inline v8::Local<v8::Value> CompileRunWithOrigin(
+ const char* source, const char* origin_url) {
+ return CompileRunWithOrigin(v8_str(source), origin_url);
+}
+
+
// Pick a slightly different port to allow tests to be run in parallel.
static inline int FlagDependentPortOffset() {
return ::v8::internal::FLAG_crankshaft == false ? 100 :
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 721f1eb4f..2f09743e2 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -46,6 +46,10 @@
# This test always fails. It tests that LiveEdit causes abort when turned off.
'test-debug/LiveEditDisabled': [FAIL],
+ # This test always fails. It tests that DisallowJavascriptExecutionScope
+ # works as intended.
+ 'test-api/DisallowJavascriptExecutionScope': [FAIL],
+
# TODO(gc): Temporarily disabled in the GC branch.
'test-log/EquivalenceOfLoggingAndTraversal': [PASS, FAIL],
@@ -61,15 +65,53 @@
# are actually 13 * 38 * 5 * 128 = 316160 individual tests hidden here.
'test-parsing/ParserSync': [PASS, NO_VARIANTS],
+ # BUG(2999).
+ 'test-cpu-profiler/CollectCpuProfile': [PASS, FLAKY],
+
############################################################################
# Slow tests.
'test-api/Threading1': [PASS, ['mode == debug', SLOW]],
'test-api/Threading2': [PASS, ['mode == debug', SLOW]],
'test-api/Threading3': [PASS, ['mode == debug', SLOW]],
'test-api/Threading4': [PASS, ['mode == debug', SLOW]],
+ 'test-strings/StringOOM*': [PASS, ['mode == debug', SKIP]],
}], # ALWAYS
##############################################################################
+['arch == arm64', {
+
+ 'test-api/Bug618': [PASS],
+
+ # BUG(v8:2999).
+ 'test-cpu-profiler/CollectCpuProfile': [PASS, FAIL],
+
+ # BUG(v8:3154).
+ 'test-heap/ReleaseOverReservedPages': [PASS, ['mode == debug', FAIL]],
+
+ # BUG(v8:3155).
+ 'test-strings/AsciiArrayJoin': [PASS, ['mode == debug', FAIL]],
+}], # 'arch == arm64'
+
+['arch == arm64 and simulator_run == True', {
+
+ # Pass but take too long with the simulator.
+ 'test-api/ExternalArrays': [PASS, TIMEOUT],
+ 'test-api/Threading1': [SKIP],
+}], # 'arch == arm64 and simulator_run == True'
+
+['arch == arm64 and mode == debug and simulator_run == True', {
+
+ # Pass but take too long with the simulator in debug mode.
+ 'test-api/ExternalDoubleArray': [SKIP],
+ 'test-api/ExternalFloat32Array': [SKIP],
+ 'test-api/ExternalFloat64Array': [SKIP],
+ 'test-api/ExternalFloatArray': [SKIP],
+ 'test-api/Float32Array': [SKIP],
+ 'test-api/Float64Array': [SKIP],
+ 'test-debug/DebugBreakLoop': [SKIP],
+}], # 'arch == arm64 and mode == debug and simulator_run == True'
+
+##############################################################################
['asan == True', {
# Skip tests not suitable for ASAN.
'test-assembler-x64/AssemblerX64XchglOperations': [SKIP],
@@ -77,6 +119,13 @@
}], # 'asan == True'
##############################################################################
+# This should be 'nosnap == True': issue 3216 to add 'nosnap'.
+[ALWAYS, {
+ # BUG(3215)
+ 'test-lockers/MultithreadedParallelIsolates': [PASS, FAIL],
+}], # 'nosnap == True'
+
+##############################################################################
['system == windows', {
# BUG(2999).
@@ -102,12 +151,6 @@
##############################################################################
['arch == arm', {
- # We cannot assume that we can throw OutOfMemory exceptions in all situations.
- # Apparently our ARM box is in such a state. Skip the test as it also runs for
- # a long time.
- 'test-api/OutOfMemory': [SKIP],
- 'test-api/OutOfMemoryNested': [SKIP],
-
# BUG(355): Test crashes on ARM.
'test-log/ProfLazyMode': [SKIP],
@@ -117,9 +160,6 @@
'test-serialize/DeserializeAndRunScript2': [SKIP],
'test-serialize/DeserializeFromSecondSerialization': [SKIP],
- # BUG(2999).
- 'test-cpu-profiler/CollectCpuProfile': [PASS, FLAKY],
-
############################################################################
# Slow tests.
'test-api/Threading1': [PASS, SLOW],
@@ -183,5 +223,8 @@
# BUG(2998).
'test-macro-assembler-arm/LoadAndStoreWithRepresentation': [SKIP],
+
+ # BUG(3150).
+ 'test-api/PreCompileInvalidPreparseDataError': [SKIP],
}], # 'arch == nacl_ia32 or arch == nacl_x64'
]
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index bda09f01a..daafb244e 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -174,6 +174,7 @@ static void XSetter(Local<Value> value, const Info& info, int offset) {
CHECK_EQ(x_holder, info.This());
CHECK_EQ(x_holder, info.Holder());
x_register[offset] = value->Int32Value();
+ info.GetReturnValue().Set(v8_num(-1));
}
@@ -210,20 +211,20 @@ THREADED_TEST(AccessorIC) {
"var key_1 = 'x1';"
"for (var j = 0; j < 10; j++) {"
" var i = 4*j;"
- " holder.x0 = i;"
+ " result.push(holder.x0 = i);"
" result.push(obj.x0);"
- " holder.x1 = i + 1;"
+ " result.push(holder.x1 = i + 1);"
" result.push(obj.x1);"
- " holder[key_0] = i + 2;"
+ " result.push(holder[key_0] = i + 2);"
" result.push(obj[key_0]);"
- " holder[key_1] = i + 3;"
+ " result.push(holder[key_1] = i + 3);"
" result.push(obj[key_1]);"
"}"
"result"));
- CHECK_EQ(40, array->Length());
- for (int i = 0; i < 40; i++) {
+ CHECK_EQ(80, array->Length());
+ for (int i = 0; i < 80; i++) {
v8::Handle<Value> entry = array->Get(v8::Integer::New(isolate, i));
- CHECK_EQ(v8::Integer::New(isolate, i), entry);
+ CHECK_EQ(v8::Integer::New(isolate, i/2), entry);
}
}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index e58612705..5ee43d3e0 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -50,6 +50,7 @@
#include "unicode-inl.h"
#include "utils.h"
#include "vm-state.h"
+#include "../include/v8-util.h"
static const bool kLogThreading = false;
@@ -204,9 +205,8 @@ THREADED_TEST(Handles) {
CHECK(!undef.IsEmpty());
CHECK(undef->IsUndefined());
- const char* c_source = "1 + 2 + 3";
- Local<String> source = String::NewFromUtf8(CcTest::isolate(), c_source);
- Local<Script> script = Script::Compile(source);
+ const char* source = "1 + 2 + 3";
+ Local<Script> script = v8_compile(source);
CHECK_EQ(6, script->Run()->Int32Value());
local_env->Exit();
@@ -445,9 +445,8 @@ THREADED_TEST(AccessElement) {
THREADED_TEST(Script) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- const char* c_source = "1 + 2 + 3";
- Local<String> source = String::NewFromUtf8(env->GetIsolate(), c_source);
- Local<Script> script = Script::Compile(source);
+ const char* source = "1 + 2 + 3";
+ Local<Script> script = v8_compile(source);
CHECK_EQ(6, script->Run()->Int32Value());
}
@@ -526,7 +525,7 @@ THREADED_TEST(ScriptUsingStringResource) {
v8::HandleScope scope(env->GetIsolate());
TestResource* resource = new TestResource(two_byte_source, &dispose_count);
Local<String> source = String::NewExternal(env->GetIsolate(), resource);
- Local<Script> script = Script::Compile(source);
+ Local<Script> script = v8_compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
@@ -562,7 +561,7 @@ THREADED_TEST(ScriptUsingAsciiStringResource) {
CHECK_EQ(static_cast<const String::ExternalStringResourceBase*>(resource),
source->GetExternalStringResourceBase(&encoding));
CHECK_EQ(String::ASCII_ENCODING, encoding);
- Local<Script> script = Script::Compile(source);
+ Local<Script> script = v8_compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
@@ -594,7 +593,7 @@ THREADED_TEST(ScriptMakingExternalString) {
bool success = source->MakeExternal(new TestResource(two_byte_source,
&dispose_count));
CHECK(success);
- Local<Script> script = Script::Compile(source);
+ Local<Script> script = v8_compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
@@ -620,7 +619,7 @@ THREADED_TEST(ScriptMakingExternalAsciiString) {
bool success = source->MakeExternal(
new TestAsciiResource(i::StrDup(c_source), &dispose_count));
CHECK(success);
- Local<Script> script = Script::Compile(source);
+ Local<Script> script = v8_compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
@@ -764,7 +763,7 @@ THREADED_TEST(UsingExternalString) {
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
i::Handle<i::String> isymbol =
- factory->InternalizedStringFromString(istring);
+ factory->InternalizeString(istring);
CHECK(isymbol->IsInternalizedString());
}
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -784,7 +783,7 @@ THREADED_TEST(UsingExternalAsciiString) {
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
i::Handle<i::String> isymbol =
- factory->InternalizedStringFromString(istring);
+ factory->InternalizeString(istring);
CHECK(isymbol->IsInternalizedString());
}
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -871,7 +870,7 @@ TEST(ExternalStringWithDisposeHandling) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
Local<String> source = String::NewExternal(env->GetIsolate(), &res_stack);
- Local<Script> script = Script::Compile(source);
+ Local<Script> script = v8_compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
@@ -892,7 +891,7 @@ TEST(ExternalStringWithDisposeHandling) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
Local<String> source = String::NewExternal(env->GetIsolate(), res_heap);
- Local<Script> script = Script::Compile(source);
+ Local<Script> script = v8_compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
@@ -944,7 +943,7 @@ THREADED_TEST(StringConcat) {
env->GetIsolate(),
new TestResource(AsciiToTwoByteString(two_byte_extern_2)));
source = String::Concat(source, right);
- Local<Script> script = Script::Compile(source);
+ Local<Script> script = v8_compile(source);
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(68, value->Int32Value());
@@ -2397,23 +2396,23 @@ THREADED_PROFILED_TEST(PropertyHandlerInPrototype) {
env->Global()->Set(v8_str("obj"), bottom);
// Indexed and named get.
- Script::Compile(v8_str("obj[0]"))->Run();
- Script::Compile(v8_str("obj.x"))->Run();
+ CompileRun("obj[0]");
+ CompileRun("obj.x");
// Indexed and named set.
- Script::Compile(v8_str("obj[1] = 42"))->Run();
- Script::Compile(v8_str("obj.y = 42"))->Run();
+ CompileRun("obj[1] = 42");
+ CompileRun("obj.y = 42");
// Indexed and named query.
- Script::Compile(v8_str("0 in obj"))->Run();
- Script::Compile(v8_str("'x' in obj"))->Run();
+ CompileRun("0 in obj");
+ CompileRun("'x' in obj");
// Indexed and named deleter.
- Script::Compile(v8_str("delete obj[0]"))->Run();
- Script::Compile(v8_str("delete obj.x"))->Run();
+ CompileRun("delete obj[0]");
+ CompileRun("delete obj.x");
// Enumerators.
- Script::Compile(v8_str("for (var p in obj) ;"))->Run();
+ CompileRun("for (var p in obj) ;");
}
@@ -2444,13 +2443,12 @@ THREADED_TEST(PrePropertyHandler) {
0,
PrePropertyHandlerQuery);
LocalContext env(NULL, desc->InstanceTemplate());
- Script::Compile(v8_str(
- "var pre = 'Object: pre'; var on = 'Object: on';"))->Run();
- v8::Handle<Value> result_pre = Script::Compile(v8_str("pre"))->Run();
+ CompileRun("var pre = 'Object: pre'; var on = 'Object: on';");
+ v8::Handle<Value> result_pre = CompileRun("pre");
CHECK_EQ(v8_str("PrePropertyHandler: pre"), result_pre);
- v8::Handle<Value> result_on = Script::Compile(v8_str("on"))->Run();
+ v8::Handle<Value> result_on = CompileRun("on");
CHECK_EQ(v8_str("Object: on"), result_on);
- v8::Handle<Value> result_post = Script::Compile(v8_str("post"))->Run();
+ v8::Handle<Value> result_post = CompileRun("post");
CHECK(result_post.IsEmpty());
}
@@ -2458,8 +2456,7 @@ THREADED_TEST(PrePropertyHandler) {
THREADED_TEST(UndefinedIsNotEnumerable) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Handle<Value> result = Script::Compile(v8_str(
- "this.propertyIsEnumerable(undefined)"))->Run();
+ v8::Handle<Value> result = CompileRun("this.propertyIsEnumerable(undefined)");
CHECK(result->IsFalse());
}
@@ -2512,7 +2509,7 @@ THREADED_TEST(DeepCrossLanguageRecursion) {
call_recursively_script = v8::Handle<Script>();
env->Global()->Set(v8_str("depth"), v8::Integer::New(isolate, 0));
- Script::Compile(v8_str("callFunctionRecursively()"))->Run();
+ CompileRun("callFunctionRecursively()");
}
@@ -2541,11 +2538,11 @@ THREADED_TEST(CallbackExceptionRegression) {
ThrowingPropertyHandlerSet);
LocalContext env;
env->Global()->Set(v8_str("obj"), obj->NewInstance());
- v8::Handle<Value> otto = Script::Compile(v8_str(
- "try { with (obj) { otto; } } catch (e) { e; }"))->Run();
+ v8::Handle<Value> otto = CompileRun(
+ "try { with (obj) { otto; } } catch (e) { e; }");
CHECK_EQ(v8_str("otto"), otto);
- v8::Handle<Value> netto = Script::Compile(v8_str(
- "try { with (obj) { netto = 4; } } catch (e) { e; }"))->Run();
+ v8::Handle<Value> netto = CompileRun(
+ "try { with (obj) { netto = 4; } } catch (e) { e; }");
CHECK_EQ(v8_str("netto"), netto);
}
@@ -2557,7 +2554,7 @@ THREADED_TEST(FunctionPrototype) {
Foo->PrototypeTemplate()->Set(v8_str("plak"), v8_num(321));
LocalContext env;
env->Global()->Set(v8_str("Foo"), Foo->GetFunction());
- Local<Script> script = Script::Compile(v8_str("Foo.prototype.plak"));
+ Local<Script> script = v8_compile("Foo.prototype.plak");
CHECK_EQ(script->Run()->Int32Value(), 321);
}
@@ -2634,6 +2631,10 @@ THREADED_TEST(InternalFieldsAlignedPointers) {
void* huge = reinterpret_cast<void*>(~static_cast<uintptr_t>(1));
CheckAlignedPointerInInternalField(obj, huge);
+
+ v8::UniquePersistent<v8::Object> persistent(isolate, obj);
+ CHECK_EQ(1, Object::InternalFieldCount(persistent));
+ CHECK_EQ(huge, Object::GetAlignedPointerFromInternalField(persistent, 0));
}
@@ -2756,7 +2757,8 @@ THREADED_TEST(SymbolProperties) {
v8::Local<v8::Object> obj = v8::Object::New(isolate);
v8::Local<v8::Symbol> sym1 = v8::Symbol::New(isolate);
- v8::Local<v8::Symbol> sym2 = v8::Symbol::New(isolate, "my-symbol");
+ v8::Local<v8::Symbol> sym2 =
+ v8::Symbol::New(isolate, v8_str("my-symbol"));
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -2774,7 +2776,7 @@ THREADED_TEST(SymbolProperties) {
CHECK(!sym1->StrictEquals(sym2));
CHECK(!sym2->StrictEquals(sym1));
- CHECK(sym2->Name()->Equals(v8::String::NewFromUtf8(isolate, "my-symbol")));
+ CHECK(sym2->Name()->Equals(v8_str("my-symbol")));
v8::Local<v8::Value> sym_val = sym2;
CHECK(sym_val->IsSymbol());
@@ -2786,7 +2788,7 @@ THREADED_TEST(SymbolProperties) {
CHECK(sym_obj->IsSymbolObject());
CHECK(!sym2->IsSymbolObject());
CHECK(!obj->IsSymbolObject());
- CHECK(sym_obj->Equals(sym2));
+ CHECK(!sym_obj->Equals(sym2));
CHECK(!sym_obj->StrictEquals(sym2));
CHECK(v8::SymbolObject::Cast(*sym_obj)->Equals(sym_obj));
CHECK(v8::SymbolObject::Cast(*sym_obj)->ValueOf()->Equals(sym2));
@@ -2844,7 +2846,8 @@ THREADED_TEST(PrivateProperties) {
v8::Local<v8::Object> obj = v8::Object::New(isolate);
v8::Local<v8::Private> priv1 = v8::Private::New(isolate);
- v8::Local<v8::Private> priv2 = v8::Private::New(isolate, "my-private");
+ v8::Local<v8::Private> priv2 =
+ v8::Private::New(isolate, v8_str("my-private"));
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -2895,6 +2898,55 @@ THREADED_TEST(PrivateProperties) {
}
+THREADED_TEST(GlobalSymbols) {
+ i::FLAG_harmony_symbols = true;
+
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<String> name = v8_str("my-symbol");
+ v8::Local<v8::Symbol> glob = v8::Symbol::For(isolate, name);
+ v8::Local<v8::Symbol> glob2 = v8::Symbol::For(isolate, name);
+ CHECK(glob2->SameValue(glob));
+
+ v8::Local<v8::Symbol> glob_api = v8::Symbol::ForApi(isolate, name);
+ v8::Local<v8::Symbol> glob_api2 = v8::Symbol::ForApi(isolate, name);
+ CHECK(glob_api2->SameValue(glob_api));
+ CHECK(!glob_api->SameValue(glob));
+
+ v8::Local<v8::Symbol> sym = v8::Symbol::New(isolate, name);
+ CHECK(!sym->SameValue(glob));
+
+ CompileRun("var sym2 = Symbol.for('my-symbol')");
+ v8::Local<Value> sym2 = env->Global()->Get(v8_str("sym2"));
+ CHECK(sym2->SameValue(glob));
+ CHECK(!sym2->SameValue(glob_api));
+}
+
+
+THREADED_TEST(GlobalPrivates) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<String> name = v8_str("my-private");
+ v8::Local<v8::Private> glob = v8::Private::ForApi(isolate, name);
+ v8::Local<v8::Object> obj = v8::Object::New(isolate);
+ CHECK(obj->SetPrivate(glob, v8::Integer::New(isolate, 3)));
+
+ v8::Local<v8::Private> glob2 = v8::Private::ForApi(isolate, name);
+ CHECK(obj->HasPrivate(glob2));
+
+ v8::Local<v8::Private> priv = v8::Private::New(isolate, name);
+ CHECK(!obj->HasPrivate(priv));
+
+ CompileRun("var intern = %CreateGlobalPrivateSymbol('my-private')");
+ v8::Local<Value> intern = env->Global()->Get(v8_str("intern"));
+ CHECK(!obj->Has(intern));
+}
+
+
class ScopedArrayBufferContents {
public:
explicit ScopedArrayBufferContents(
@@ -3274,7 +3326,7 @@ THREADED_TEST(External) {
Local<v8::External> ext = v8::External::New(CcTest::isolate(), &x);
LocalContext env;
env->Global()->Set(v8_str("ext"), ext);
- Local<Value> reext_obj = Script::Compile(v8_str("this.ext"))->Run();
+ Local<Value> reext_obj = CompileRun("this.ext");
v8::Handle<v8::External> reext = reext_obj.As<v8::External>();
int* ptr = static_cast<int*>(reext->Value());
CHECK_EQ(x, 3);
@@ -3443,6 +3495,89 @@ THREADED_TEST(UniquePersistent) {
}
+template<typename K, typename V>
+class WeakStdMapTraits : public v8::StdMapTraits<K, V> {
+ public:
+ typedef typename v8::DefaultPersistentValueMapTraits<K, V>::Impl Impl;
+ static const bool kIsWeak = true;
+ struct WeakCallbackDataType {
+ Impl* impl;
+ K key;
+ };
+ static WeakCallbackDataType* WeakCallbackParameter(
+ Impl* impl, const K& key, Local<V> value) {
+ WeakCallbackDataType* data = new WeakCallbackDataType;
+ data->impl = impl;
+ data->key = key;
+ return data;
+ }
+ static Impl* ImplFromWeakCallbackData(
+ const v8::WeakCallbackData<V, WeakCallbackDataType>& data) {
+ return data.GetParameter()->impl;
+ }
+ static K KeyFromWeakCallbackData(
+ const v8::WeakCallbackData<V, WeakCallbackDataType>& data) {
+ return data.GetParameter()->key;
+ }
+ static void DisposeCallbackData(WeakCallbackDataType* data) {
+ delete data;
+ }
+ static void Dispose(v8::Isolate* isolate, v8::UniquePersistent<V> value,
+ Impl* impl, K key) { }
+};
+
+
+template<typename Map>
+static void TestPersistentValueMap() {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Map map(isolate);
+ v8::internal::GlobalHandles* global_handles =
+ reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
+ int initial_handle_count = global_handles->global_handles_count();
+ CHECK_EQ(0, static_cast<int>(map.Size()));
+ {
+ HandleScope scope(isolate);
+ Local<v8::Object> obj = map.Get(7);
+ CHECK(obj.IsEmpty());
+ Local<v8::Object> expected = v8::Object::New(isolate);
+ map.Set(7, expected);
+ CHECK_EQ(1, static_cast<int>(map.Size()));
+ obj = map.Get(7);
+ CHECK_EQ(expected, obj);
+ v8::UniquePersistent<v8::Object> removed = map.Remove(7);
+ CHECK_EQ(0, static_cast<int>(map.Size()));
+ CHECK(expected == removed);
+ removed = map.Remove(7);
+ CHECK(removed.IsEmpty());
+ map.Set(8, expected);
+ CHECK_EQ(1, static_cast<int>(map.Size()));
+ map.Set(8, expected);
+ CHECK_EQ(1, static_cast<int>(map.Size()));
+ }
+ CHECK_EQ(initial_handle_count + 1, global_handles->global_handles_count());
+ if (map.IsWeak()) {
+ reinterpret_cast<v8::internal::Isolate*>(isolate)->heap()->
+ CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ } else {
+ map.Clear();
+ }
+ CHECK_EQ(0, static_cast<int>(map.Size()));
+ CHECK_EQ(initial_handle_count, global_handles->global_handles_count());
+}
+
+
+TEST(PersistentValueMap) {
+ // Default case, w/o weak callbacks:
+ TestPersistentValueMap<v8::StdPersistentValueMap<int, v8::Object> >();
+
+ // Custom traits with weak callbacks:
+ typedef v8::StdPersistentValueMap<int, v8::Object,
+ WeakStdMapTraits<int, v8::Object> > WeakPersistentValueMap;
+ TestPersistentValueMap<WeakPersistentValueMap>();
+}
+
+
THREADED_TEST(GlobalHandleUpcast) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
@@ -3945,7 +4080,7 @@ TEST(ApiObjectGroupsCycleForScavenger) {
THREADED_TEST(ScriptException) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- Local<Script> script = Script::Compile(v8_str("throw 'panama!';"));
+ Local<Script> script = v8_compile("throw 'panama!';");
v8::TryCatch try_catch;
Local<Value> result = script->Run();
CHECK(result.IsEmpty());
@@ -3974,7 +4109,6 @@ static void check_message_0(v8::Handle<v8::Message> message,
v8::Handle<Value> data) {
CHECK_EQ(5.76, data->NumberValue());
CHECK_EQ(6.75, message->GetScriptResourceName()->NumberValue());
- CHECK_EQ(7.56, message->GetScriptData()->NumberValue());
CHECK(!message->IsSharedCrossOrigin());
message_received = true;
}
@@ -3986,11 +4120,7 @@ THREADED_TEST(MessageHandler0) {
CHECK(!message_received);
LocalContext context;
v8::V8::AddMessageListener(check_message_0, v8_num(5.76));
- v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8_str("6.75"));
- v8::Handle<v8::Script> script = Script::Compile(v8_str("throw 'error'"),
- &origin);
- script->SetData(v8_str("7.56"));
+ v8::Handle<v8::Script> script = CompileWithOrigin("throw 'error'", "6.75");
script->Run();
CHECK(message_received);
// clear out the message listener
@@ -4166,13 +4296,13 @@ THREADED_TEST(GetSetProperty) {
context->Global()->Set(v8_str("12"), v8_num(92));
context->Global()->Set(v8::Integer::New(isolate, 16), v8_num(32));
context->Global()->Set(v8_num(13), v8_num(56));
- Local<Value> foo = Script::Compile(v8_str("this.foo"))->Run();
+ Local<Value> foo = CompileRun("this.foo");
CHECK_EQ(14, foo->Int32Value());
- Local<Value> twelve = Script::Compile(v8_str("this[12]"))->Run();
+ Local<Value> twelve = CompileRun("this[12]");
CHECK_EQ(92, twelve->Int32Value());
- Local<Value> sixteen = Script::Compile(v8_str("this[16]"))->Run();
+ Local<Value> sixteen = CompileRun("this[16]");
CHECK_EQ(32, sixteen->Int32Value());
- Local<Value> thirteen = Script::Compile(v8_str("this[13]"))->Run();
+ Local<Value> thirteen = CompileRun("this[13]");
CHECK_EQ(56, thirteen->Int32Value());
CHECK_EQ(92,
context->Global()->Get(v8::Integer::New(isolate, 12))->Int32Value());
@@ -4201,7 +4331,7 @@ THREADED_TEST(PropertyAttributes) {
context->Global()->Set(prop, v8_num(7), v8::ReadOnly);
CHECK_EQ(7, context->Global()->Get(prop)->Int32Value());
CHECK_EQ(v8::ReadOnly, context->Global()->GetPropertyAttributes(prop));
- Script::Compile(v8_str("read_only = 9"))->Run();
+ CompileRun("read_only = 9");
CHECK_EQ(7, context->Global()->Get(prop)->Int32Value());
context->Global()->Set(prop, v8_num(10));
CHECK_EQ(7, context->Global()->Get(prop)->Int32Value());
@@ -4209,7 +4339,7 @@ THREADED_TEST(PropertyAttributes) {
prop = v8_str("dont_delete");
context->Global()->Set(prop, v8_num(13), v8::DontDelete);
CHECK_EQ(13, context->Global()->Get(prop)->Int32Value());
- Script::Compile(v8_str("delete dont_delete"))->Run();
+ CompileRun("delete dont_delete");
CHECK_EQ(13, context->Global()->Get(prop)->Int32Value());
CHECK_EQ(v8::DontDelete, context->Global()->GetPropertyAttributes(prop));
// dont-enum
@@ -4248,7 +4378,7 @@ THREADED_TEST(Array) {
CHECK(!array->Has(1));
CHECK(array->Has(2));
CHECK_EQ(7, array->Get(2)->Int32Value());
- Local<Value> obj = Script::Compile(v8_str("[1, 2, 3]"))->Run();
+ Local<Value> obj = CompileRun("[1, 2, 3]");
Local<v8::Array> arr = obj.As<v8::Array>();
CHECK_EQ(3, arr->Length());
CHECK_EQ(1, arr->Get(0)->Int32Value());
@@ -4399,113 +4529,6 @@ THREADED_TEST(FunctionCall) {
}
-static const char* js_code_causing_out_of_memory =
- "var a = new Array(); while(true) a.push(a);";
-
-
-// These tests run for a long time and prevent us from running tests
-// that come after them so they cannot run in parallel.
-TEST(OutOfMemory) {
- // It's not possible to read a snapshot into a heap with different dimensions.
- if (i::Snapshot::IsEnabled()) return;
- // Set heap limits.
- static const int K = 1024;
- v8::ResourceConstraints constraints;
- constraints.set_max_young_space_size(256 * K);
- constraints.set_max_old_space_size(5 * K * K);
- v8::SetResourceConstraints(CcTest::isolate(), &constraints);
-
- // Execute a script that causes out of memory.
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- v8::V8::IgnoreOutOfMemoryException();
- Local<Script> script = Script::Compile(String::NewFromUtf8(
- context->GetIsolate(), js_code_causing_out_of_memory));
- Local<Value> result = script->Run();
-
- // Check for out of memory state.
- CHECK(result.IsEmpty());
- CHECK(context->HasOutOfMemoryException());
-}
-
-
-void ProvokeOutOfMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
- ApiTestFuzzer::Fuzz();
-
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- Local<Script> script = Script::Compile(String::NewFromUtf8(
- context->GetIsolate(), js_code_causing_out_of_memory));
- Local<Value> result = script->Run();
-
- // Check for out of memory state.
- CHECK(result.IsEmpty());
- CHECK(context->HasOutOfMemoryException());
-
- args.GetReturnValue().Set(result);
-}
-
-
-TEST(OutOfMemoryNested) {
- // It's not possible to read a snapshot into a heap with different dimensions.
- if (i::Snapshot::IsEnabled()) return;
- // Set heap limits.
- static const int K = 1024;
- v8::ResourceConstraints constraints;
- constraints.set_max_young_space_size(256 * K);
- constraints.set_max_old_space_size(5 * K * K);
- v8::Isolate* isolate = CcTest::isolate();
- v8::SetResourceConstraints(isolate, &constraints);
-
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->Set(v8_str("ProvokeOutOfMemory"),
- v8::FunctionTemplate::New(isolate, ProvokeOutOfMemory));
- LocalContext context(0, templ);
- v8::V8::IgnoreOutOfMemoryException();
- Local<Value> result = CompileRun(
- "var thrown = false;"
- "try {"
- " ProvokeOutOfMemory();"
- "} catch (e) {"
- " thrown = true;"
- "}");
- // Check for out of memory state.
- CHECK(result.IsEmpty());
- CHECK(context->HasOutOfMemoryException());
-}
-
-
-void OOMCallback(const char* location, const char* message) {
- exit(0);
-}
-
-
-TEST(HugeConsStringOutOfMemory) {
- // It's not possible to read a snapshot into a heap with different dimensions.
- if (i::Snapshot::IsEnabled()) return;
- // Set heap limits.
- static const int K = 1024;
- v8::ResourceConstraints constraints;
- constraints.set_max_young_space_size(256 * K);
- constraints.set_max_old_space_size(4 * K * K);
- v8::SetResourceConstraints(CcTest::isolate(), &constraints);
-
- // Execute a script that causes out of memory.
- v8::V8::SetFatalErrorHandler(OOMCallback);
-
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
-
- // Build huge string. This should fail with out of memory exception.
- CompileRun(
- "var str = Array.prototype.join.call({length: 513}, \"A\").toUpperCase();"
- "for (var i = 0; i < 22; i++) { str = str + str; }");
-
- CHECK(false); // Should not return.
-}
-
-
THREADED_TEST(ConstructCall) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
@@ -4734,7 +4757,7 @@ void CCatcher(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
v8::HandleScope scope(args.GetIsolate());
v8::TryCatch try_catch;
- Local<Value> result = v8::Script::Compile(args[0]->ToString())->Run();
+ Local<Value> result = CompileRun(args[0]->ToString());
CHECK(!try_catch.HasCaught() || result.IsEmpty());
args.GetReturnValue().Set(try_catch.HasCaught());
}
@@ -4996,9 +5019,7 @@ THREADED_TEST(ExternalScriptException) {
LocalContext context(0, templ);
v8::TryCatch try_catch;
- Local<Script> script
- = Script::Compile(v8_str("ThrowFromC(); throw 'panama';"));
- Local<Value> result = script->Run();
+ Local<Value> result = CompileRun("ThrowFromC(); throw 'panama';");
CHECK(result.IsEmpty());
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value(try_catch.Exception());
@@ -5190,12 +5211,12 @@ THREADED_TEST(CatchZero) {
v8::HandleScope scope(context->GetIsolate());
v8::TryCatch try_catch;
CHECK(!try_catch.HasCaught());
- Script::Compile(v8_str("throw 10"))->Run();
+ CompileRun("throw 10");
CHECK(try_catch.HasCaught());
CHECK_EQ(10, try_catch.Exception()->Int32Value());
try_catch.Reset();
CHECK(!try_catch.HasCaught());
- Script::Compile(v8_str("throw 0"))->Run();
+ CompileRun("throw 0");
CHECK(try_catch.HasCaught());
CHECK_EQ(0, try_catch.Exception()->Int32Value());
}
@@ -5206,7 +5227,7 @@ THREADED_TEST(CatchExceptionFromWith) {
v8::HandleScope scope(context->GetIsolate());
v8::TryCatch try_catch;
CHECK(!try_catch.HasCaught());
- Script::Compile(v8_str("var o = {}; with (o) { throw 42; }"))->Run();
+ CompileRun("var o = {}; with (o) { throw 42; }");
CHECK(try_catch.HasCaught());
}
@@ -5358,7 +5379,7 @@ THREADED_TEST(Equality) {
THREADED_TEST(MultiRun) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- Local<Script> script = Script::Compile(v8_str("x"));
+ Local<Script> script = v8_compile("x");
for (int i = 0; i < 10; i++)
script->Run();
}
@@ -5380,7 +5401,7 @@ THREADED_TEST(SimplePropertyRead) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"));
context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> script = Script::Compile(v8_str("obj.x"));
+ Local<Script> script = v8_compile("obj.x");
for (int i = 0; i < 10; i++) {
Local<Value> result = script->Run();
CHECK_EQ(result, v8_str("x"));
@@ -5397,19 +5418,19 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
context->Global()->Set(v8_str("obj"), templ->NewInstance());
// Uses getOwnPropertyDescriptor to check the configurable status
- Local<Script> script_desc
- = Script::Compile(v8_str("var prop = Object.getOwnPropertyDescriptor( "
- "obj, 'x');"
- "prop.configurable;"));
+ Local<Script> script_desc = v8_compile(
+ "var prop = Object.getOwnPropertyDescriptor( "
+ "obj, 'x');"
+ "prop.configurable;");
Local<Value> result = script_desc->Run();
CHECK_EQ(result->BooleanValue(), true);
// Redefine get - but still configurable
- Local<Script> script_define
- = Script::Compile(v8_str("var desc = { get: function(){return 42; },"
- " configurable: true };"
- "Object.defineProperty(obj, 'x', desc);"
- "obj.x"));
+ Local<Script> script_define = v8_compile(
+ "var desc = { get: function(){return 42; },"
+ " configurable: true };"
+ "Object.defineProperty(obj, 'x', desc);"
+ "obj.x");
result = script_define->Run();
CHECK_EQ(result, v8_num(42));
@@ -5418,11 +5439,11 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
CHECK_EQ(result->BooleanValue(), true);
// Redefine to a non-configurable
- script_define
- = Script::Compile(v8_str("var desc = { get: function(){return 43; },"
- " configurable: false };"
- "Object.defineProperty(obj, 'x', desc);"
- "obj.x"));
+ script_define = v8_compile(
+ "var desc = { get: function(){return 43; },"
+ " configurable: false };"
+ "Object.defineProperty(obj, 'x', desc);"
+ "obj.x");
result = script_define->Run();
CHECK_EQ(result, v8_num(43));
result = script_desc->Run();
@@ -5445,18 +5466,19 @@ THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> script_desc = Script::Compile(v8_str("var prop ="
- "Object.getOwnPropertyDescriptor( "
- "obj, 'x');"
- "prop.configurable;"));
+ Local<Script> script_desc = v8_compile(
+ "var prop ="
+ "Object.getOwnPropertyDescriptor( "
+ "obj, 'x');"
+ "prop.configurable;");
Local<Value> result = script_desc->Run();
CHECK_EQ(result->BooleanValue(), true);
- Local<Script> script_define =
- Script::Compile(v8_str("var desc = {get: function(){return 42; },"
- " configurable: true };"
- "Object.defineProperty(obj, 'x', desc);"
- "obj.x"));
+ Local<Script> script_define = v8_compile(
+ "var desc = {get: function(){return 42; },"
+ " configurable: true };"
+ "Object.defineProperty(obj, 'x', desc);"
+ "obj.x");
result = script_define->Run();
CHECK_EQ(result, v8_num(42));
@@ -5465,11 +5487,11 @@ THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
CHECK_EQ(result->BooleanValue(), true);
- script_define =
- Script::Compile(v8_str("var desc = {get: function(){return 43; },"
- " configurable: false };"
- "Object.defineProperty(obj, 'x', desc);"
- "obj.x"));
+ script_define = v8_compile(
+ "var desc = {get: function(){return 43; },"
+ " configurable: false };"
+ "Object.defineProperty(obj, 'x', desc);"
+ "obj.x");
result = script_define->Run();
CHECK_EQ(result, v8_num(43));
result = script_desc->Run();
@@ -5668,7 +5690,7 @@ THREADED_TEST(SimplePropertyWrite) {
templ->SetAccessor(v8_str("x"), GetXValue, SetXValue, v8_str("donut"));
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> script = Script::Compile(v8_str("obj.x = 4"));
+ Local<Script> script = v8_compile("obj.x = 4");
for (int i = 0; i < 10; i++) {
CHECK(xValue.IsEmpty());
script->Run();
@@ -5685,7 +5707,7 @@ THREADED_TEST(SetterOnly) {
templ->SetAccessor(v8_str("x"), NULL, SetXValue, v8_str("donut"));
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> script = Script::Compile(v8_str("obj.x = 4; obj.x"));
+ Local<Script> script = v8_compile("obj.x = 4; obj.x");
for (int i = 0; i < 10; i++) {
CHECK(xValue.IsEmpty());
script->Run();
@@ -5705,7 +5727,7 @@ THREADED_TEST(NoAccessors) {
v8_str("donut"));
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> script = Script::Compile(v8_str("obj.x = 4; obj.x"));
+ Local<Script> script = v8_compile("obj.x = 4; obj.x");
for (int i = 0; i < 10; i++) {
script->Run();
}
@@ -5727,7 +5749,7 @@ THREADED_TEST(NamedInterceptorPropertyRead) {
templ->SetNamedPropertyHandler(XPropertyGetter);
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> script = Script::Compile(v8_str("obj.x"));
+ Local<Script> script = v8_compile("obj.x");
for (int i = 0; i < 10; i++) {
Local<Value> result = script->Run();
CHECK_EQ(result, v8_str("x"));
@@ -5743,7 +5765,7 @@ THREADED_TEST(NamedInterceptorDictionaryIC) {
LocalContext context;
// Create an object with a named interceptor.
context->Global()->Set(v8_str("interceptor_obj"), templ->NewInstance());
- Local<Script> script = Script::Compile(v8_str("interceptor_obj.x"));
+ Local<Script> script = v8_compile("interceptor_obj.x");
for (int i = 0; i < 10; i++) {
Local<Value> result = script->Run();
CHECK_EQ(result, v8_str("x"));
@@ -5866,18 +5888,18 @@ THREADED_TEST(IndexedInterceptorWithIndexedAccessor) {
IndexedPropertySetter);
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> getter_script = Script::Compile(v8_str(
- "obj.__defineGetter__(\"3\", function(){return 5;});obj[3];"));
- Local<Script> setter_script = Script::Compile(v8_str(
+ Local<Script> getter_script = v8_compile(
+ "obj.__defineGetter__(\"3\", function(){return 5;});obj[3];");
+ Local<Script> setter_script = v8_compile(
"obj.__defineSetter__(\"17\", function(val){this.foo = val;});"
"obj[17] = 23;"
- "obj.foo;"));
- Local<Script> interceptor_setter_script = Script::Compile(v8_str(
+ "obj.foo;");
+ Local<Script> interceptor_setter_script = v8_compile(
"obj.__defineSetter__(\"39\", function(val){this.foo = \"hit\";});"
"obj[39] = 47;"
- "obj.foo;")); // This setter should not run, due to the interceptor.
- Local<Script> interceptor_getter_script = Script::Compile(v8_str(
- "obj[37];"));
+ "obj.foo;"); // This setter should not run, due to the interceptor.
+ Local<Script> interceptor_getter_script = v8_compile(
+ "obj[37];");
Local<Value> result = getter_script->Run();
CHECK_EQ(v8_num(5), result);
result = setter_script->Run();
@@ -5913,10 +5935,10 @@ static void UnboxedDoubleIndexedPropertySetter(
void UnboxedDoubleIndexedPropertyEnumerator(
const v8::PropertyCallbackInfo<v8::Array>& info) {
// Force the list of returned keys to be stored in a FastDoubleArray.
- Local<Script> indexed_property_names_script = Script::Compile(v8_str(
+ Local<Script> indexed_property_names_script = v8_compile(
"keys = new Array(); keys[125000] = 1;"
"for(i = 0; i < 80000; i++) { keys[i] = i; };"
- "keys.length = 25; keys;"));
+ "keys.length = 25; keys;");
Local<Value> result = indexed_property_names_script->Run();
info.GetReturnValue().Set(Local<v8::Array>::Cast(result));
}
@@ -5936,29 +5958,28 @@ THREADED_TEST(IndexedInterceptorUnboxedDoubleWithIndexedAccessor) {
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
// When obj is created, force it to be Stored in a FastDoubleArray.
- Local<Script> create_unboxed_double_script = Script::Compile(v8_str(
+ Local<Script> create_unboxed_double_script = v8_compile(
"obj[125000] = 1; for(i = 0; i < 80000; i+=2) { obj[i] = i; } "
"key_count = 0; "
"for (x in obj) {key_count++;};"
- "obj;"));
+ "obj;");
Local<Value> result = create_unboxed_double_script->Run();
CHECK(result->ToObject()->HasRealIndexedProperty(2000));
- Local<Script> key_count_check = Script::Compile(v8_str(
- "key_count;"));
+ Local<Script> key_count_check = v8_compile("key_count;");
result = key_count_check->Run();
CHECK_EQ(v8_num(40013), result);
}
-void NonStrictArgsIndexedPropertyEnumerator(
+void SloppyArgsIndexedPropertyEnumerator(
const v8::PropertyCallbackInfo<v8::Array>& info) {
// Force the list of returned keys to be stored in a Arguments object.
- Local<Script> indexed_property_names_script = Script::Compile(v8_str(
+ Local<Script> indexed_property_names_script = v8_compile(
"function f(w,x) {"
" return arguments;"
"}"
"keys = f(0, 1, 2, 3);"
- "keys;"));
+ "keys;");
Local<Object> result =
Local<Object>::Cast(indexed_property_names_script->Run());
// Have to populate the handle manually, as it's not Cast-able.
@@ -5969,7 +5990,7 @@ void NonStrictArgsIndexedPropertyEnumerator(
}
-static void NonStrictIndexedPropertyGetter(
+static void SloppyIndexedPropertyGetter(
uint32_t index,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
@@ -5981,21 +6002,20 @@ static void NonStrictIndexedPropertyGetter(
// Make sure that the the interceptor code in the runtime properly handles
// merging property name lists for non-string arguments arrays.
-THREADED_TEST(IndexedInterceptorNonStrictArgsWithIndexedAccessor) {
+THREADED_TEST(IndexedInterceptorSloppyArgsWithIndexedAccessor) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetIndexedPropertyHandler(NonStrictIndexedPropertyGetter,
+ templ->SetIndexedPropertyHandler(SloppyIndexedPropertyGetter,
0,
0,
0,
- NonStrictArgsIndexedPropertyEnumerator);
+ SloppyArgsIndexedPropertyEnumerator);
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
- Local<Script> create_args_script =
- Script::Compile(v8_str(
- "var key_count = 0;"
- "for (x in obj) {key_count++;} key_count;"));
+ Local<Script> create_args_script = v8_compile(
+ "var key_count = 0;"
+ "for (x in obj) {key_count++;} key_count;");
Local<Value> result = create_args_script->Run();
CHECK_EQ(v8_num(4), result);
}
@@ -6370,11 +6390,11 @@ THREADED_TEST(Regress892105) {
"8901");
LocalContext env0;
- Local<Script> script0 = Script::Compile(source);
+ Local<Script> script0 = v8_compile(source);
CHECK_EQ(8901.0, script0->Run()->NumberValue());
LocalContext env1;
- Local<Script> script1 = Script::Compile(source);
+ Local<Script> script1 = v8_compile(source);
CHECK_EQ(8901.0, script1->Run()->NumberValue());
}
@@ -6481,19 +6501,19 @@ THREADED_TEST(ExtensibleOnUndetectable) {
Local<String> source = v8_str("undetectable.x = 42;"
"undetectable.x");
- Local<Script> script = Script::Compile(source);
+ Local<Script> script = v8_compile(source);
CHECK_EQ(v8::Integer::New(isolate, 42), script->Run());
ExpectBoolean("Object.isExtensible(undetectable)", true);
source = v8_str("Object.preventExtensions(undetectable);");
- script = Script::Compile(source);
+ script = v8_compile(source);
script->Run();
ExpectBoolean("Object.isExtensible(undetectable)", false);
source = v8_str("undetectable.y = 2000;");
- script = Script::Compile(source);
+ script = v8_compile(source);
script->Run();
ExpectBoolean("undetectable.y == undefined", true);
}
@@ -6586,7 +6606,7 @@ TEST(PersistentHandles) {
Local<String> str = v8_str("foo");
v8::Persistent<String> p_str(isolate, str);
p_str.Reset();
- Local<Script> scr = Script::Compile(v8_str(""));
+ Local<Script> scr = v8_compile("");
v8::Persistent<Script> p_scr(isolate, scr);
p_scr.Reset();
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
@@ -6609,7 +6629,7 @@ THREADED_TEST(GlobalObjectTemplate) {
v8::FunctionTemplate::New(isolate, HandleLogDelegator));
v8::Local<Context> context = Context::New(isolate, 0, global_template);
Context::Scope context_scope(context);
- Script::Compile(v8_str("JSNI_Log('LOG')"))->Run();
+ CompileRun("JSNI_Log('LOG')");
}
@@ -6627,7 +6647,7 @@ TEST(SimpleExtensions) {
v8::Handle<Context> context =
Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str("Foo()"))->Run();
+ v8::Handle<Value> result = CompileRun("Foo()");
CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 4));
}
@@ -6640,7 +6660,7 @@ TEST(NullExtensions) {
v8::Handle<Context> context =
Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str("1+3"))->Run();
+ v8::Handle<Value> result = CompileRun("1+3");
CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 4));
}
@@ -6678,7 +6698,7 @@ TEST(ExtensionWithSourceLength) {
Context::New(CcTest::isolate(), &extensions);
if (source_len == kEmbeddedExtensionSourceValidLen) {
Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str("Ret54321()"))->Run();
+ v8::Handle<Value> result = CompileRun("Ret54321()");
CHECK_EQ(v8::Integer::New(CcTest::isolate(), 54321), result);
} else {
// Anything but exactly the right length should fail to compile.
@@ -6714,9 +6734,9 @@ TEST(UseEvalFromExtension) {
v8::Handle<Context> context =
Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str("UseEval1()"))->Run();
+ v8::Handle<Value> result = CompileRun("UseEval1()");
CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 42));
- result = Script::Compile(v8_str("UseEval2()"))->Run();
+ result = CompileRun("UseEval2()");
CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 42));
}
@@ -6748,9 +6768,9 @@ TEST(UseWithFromExtension) {
v8::Handle<Context> context =
Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str("UseWith1()"))->Run();
+ v8::Handle<Value> result = CompileRun("UseWith1()");
CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 87));
- result = Script::Compile(v8_str("UseWith2()"))->Run();
+ result = CompileRun("UseWith2()");
CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 87));
}
@@ -6763,7 +6783,7 @@ TEST(AutoExtensions) {
v8::Handle<Context> context =
Context::New(CcTest::isolate());
Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str("Foo()"))->Run();
+ v8::Handle<Value> result = CompileRun("Foo()");
CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 4));
}
@@ -6823,7 +6843,7 @@ TEST(NativeCallInExtensions) {
v8::Handle<Context> context =
Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str(kNativeCallTest))->Run();
+ v8::Handle<Value> result = CompileRun(kNativeCallTest);
CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 3));
}
@@ -6860,7 +6880,7 @@ TEST(NativeFunctionDeclaration) {
v8::Handle<Context> context =
Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str("foo(42);"))->Run();
+ v8::Handle<Value> result = CompileRun("foo(42);");
CHECK_EQ(result, v8::Integer::New(CcTest::isolate(), 42));
}
@@ -6991,11 +7011,11 @@ THREADED_TEST(FunctionLookup) {
LocalContext context(&config);
CHECK_EQ(3, lookup_count);
CHECK_EQ(v8::Integer::New(CcTest::isolate(), 8),
- Script::Compile(v8_str("Foo(0)"))->Run());
+ CompileRun("Foo(0)"));
CHECK_EQ(v8::Integer::New(CcTest::isolate(), 7),
- Script::Compile(v8_str("Foo(1)"))->Run());
+ CompileRun("Foo(1)"));
CHECK_EQ(v8::Integer::New(CcTest::isolate(), 6),
- Script::Compile(v8_str("Foo(2)"))->Run());
+ CompileRun("Foo(2)"));
}
@@ -7009,11 +7029,11 @@ THREADED_TEST(NativeFunctionConstructCall) {
// Run a few times to ensure that allocation of objects doesn't
// change behavior of a constructor function.
CHECK_EQ(v8::Integer::New(CcTest::isolate(), 8),
- Script::Compile(v8_str("(new A()).data"))->Run());
+ CompileRun("(new A()).data"));
CHECK_EQ(v8::Integer::New(CcTest::isolate(), 7),
- Script::Compile(v8_str("(new B()).data"))->Run());
+ CompileRun("(new B()).data"));
CHECK_EQ(v8::Integer::New(CcTest::isolate(), 6),
- Script::Compile(v8_str("(new C()).data"))->Run());
+ CompileRun("(new C()).data"));
}
}
@@ -7059,7 +7079,7 @@ THREADED_TEST(ErrorWithMissingScriptInfo) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
v8::V8::AddMessageListener(MissingScriptInfoMessageListener);
- Script::Compile(v8_str("throw Error()"))->Run();
+ CompileRun("throw Error()");
v8::V8::RemoveMessageListeners(MissingScriptInfoMessageListener);
}
@@ -7099,14 +7119,14 @@ THREADED_TEST(IndependentWeakHandle) {
object_a.handle.MarkIndependent();
object_b.handle.MarkIndependent();
CHECK(object_b.handle.IsIndependent());
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
CHECK(object_a.flag);
CHECK(object_b.flag);
}
static void InvokeScavenge() {
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
}
@@ -7188,7 +7208,7 @@ THREADED_TEST(IndependentHandleRevival) {
object.flag = false;
object.handle.SetWeak(&object, &RevivingCallback);
object.handle.MarkIndependent();
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
CHECK(object.flag);
CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
{
@@ -7480,7 +7500,7 @@ THREADED_TEST(ObjectInstantiation) {
CHECK_NE(obj, context->Global()->Get(v8_str("o")));
context->Global()->Set(v8_str("o2"), obj);
v8::Handle<Value> value =
- Script::Compile(v8_str("o.__proto__ === o2.__proto__"))->Run();
+ CompileRun("o.__proto__ === o2.__proto__");
CHECK_EQ(v8::True(isolate), value);
context->Global()->Set(v8_str("o"), obj);
}
@@ -8241,13 +8261,14 @@ TEST(ApiUncaughtException) {
v8::Local<v8::Object> global = env->Global();
global->Set(v8_str("trouble"), fun->GetFunction());
- Script::Compile(v8_str("function trouble_callee() {"
- " var x = null;"
- " return x.foo;"
- "};"
- "function trouble_caller() {"
- " trouble();"
- "};"))->Run();
+ CompileRun(
+ "function trouble_callee() {"
+ " var x = null;"
+ " return x.foo;"
+ "};"
+ "function trouble_caller() {"
+ " trouble();"
+ "};");
Local<Value> trouble = global->Get(v8_str("trouble"));
CHECK(trouble->IsFunction());
Local<Value> trouble_callee = global->Get(v8_str("trouble_callee"));
@@ -8283,13 +8304,12 @@ TEST(ExceptionInNativeScript) {
v8::Local<v8::Object> global = env->Global();
global->Set(v8_str("trouble"), fun->GetFunction());
- Script::Compile(
- v8_str(
- "function trouble() {\n"
- " var o = {};\n"
- " new o.foo();\n"
- "};"),
- v8::String::NewFromUtf8(isolate, script_resource_name))->Run();
+ CompileRunWithOrigin(
+ "function trouble() {\n"
+ " var o = {};\n"
+ " new o.foo();\n"
+ "};",
+ script_resource_name);
Local<Value> trouble = global->Get(v8_str("trouble"));
CHECK(trouble->IsFunction());
Function::Cast(*trouble)->Call(global, 0, NULL);
@@ -8301,7 +8321,7 @@ TEST(CompilationErrorUsingTryCatchHandler) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::TryCatch try_catch;
- Script::Compile(v8_str("This doesn't &*&@#$&*^ compile."));
+ v8_compile("This doesn't &*&@#$&*^ compile.");
CHECK_NE(NULL, *try_catch.Exception());
CHECK(try_catch.HasCaught());
}
@@ -8311,18 +8331,20 @@ TEST(TryCatchFinallyUsingTryCatchHandler) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::TryCatch try_catch;
- Script::Compile(v8_str("try { throw ''; } catch (e) {}"))->Run();
+ CompileRun("try { throw ''; } catch (e) {}");
CHECK(!try_catch.HasCaught());
- Script::Compile(v8_str("try { throw ''; } finally {}"))->Run();
+ CompileRun("try { throw ''; } finally {}");
CHECK(try_catch.HasCaught());
try_catch.Reset();
- Script::Compile(v8_str("(function() {"
- "try { throw ''; } finally { return; }"
- "})()"))->Run();
+ CompileRun(
+ "(function() {"
+ "try { throw ''; } finally { return; }"
+ "})()");
CHECK(!try_catch.HasCaught());
- Script::Compile(v8_str("(function()"
- " { try { throw ''; } finally { throw 0; }"
- "})()"))->Run();
+ CompileRun(
+ "(function()"
+ " { try { throw ''; } finally { throw 0; }"
+ "})()");
CHECK(try_catch.HasCaught());
}
@@ -8400,12 +8422,12 @@ THREADED_TEST(SecurityChecks) {
env1->SetSecurityToken(foo);
// Create a function in env1.
- Script::Compile(v8_str("spy=function(){return spy;}"))->Run();
+ CompileRun("spy=function(){return spy;}");
Local<Value> spy = env1->Global()->Get(v8_str("spy"));
CHECK(spy->IsFunction());
// Create another function accessing global objects.
- Script::Compile(v8_str("spy2=function(){return new this.Array();}"))->Run();
+ CompileRun("spy2=function(){return new this.Array();}");
Local<Value> spy2 = env1->Global()->Get(v8_str("spy2"));
CHECK(spy2->IsFunction());
@@ -8518,7 +8540,7 @@ THREADED_TEST(CrossDomainDelete) {
{
Context::Scope scope_env2(env2);
Local<Value> result =
- Script::Compile(v8_str("delete env1.prop"))->Run();
+ CompileRun("delete env1.prop");
CHECK(result->IsFalse());
}
@@ -8548,7 +8570,7 @@ THREADED_TEST(CrossDomainIsPropertyEnumerable) {
Local<String> test = v8_str("propertyIsEnumerable.call(env1, 'prop')");
{
Context::Scope scope_env2(env2);
- Local<Value> result = Script::Compile(test)->Run();
+ Local<Value> result = CompileRun(test);
CHECK(result->IsTrue());
}
@@ -8556,7 +8578,7 @@ THREADED_TEST(CrossDomainIsPropertyEnumerable) {
env2->SetSecurityToken(bar);
{
Context::Scope scope_env2(env2);
- Local<Value> result = Script::Compile(test)->Run();
+ Local<Value> result = CompileRun(test);
CHECK(result->IsFalse());
}
}
@@ -9768,10 +9790,10 @@ THREADED_TEST(InstanceProperties) {
Local<Value> o = t->GetFunction()->NewInstance();
context->Global()->Set(v8_str("i"), o);
- Local<Value> value = Script::Compile(v8_str("i.x"))->Run();
+ Local<Value> value = CompileRun("i.x");
CHECK_EQ(42, value->Int32Value());
- value = Script::Compile(v8_str("i.f()"))->Run();
+ value = CompileRun("i.f()");
CHECK_EQ(12, value->Int32Value());
}
@@ -9820,22 +9842,22 @@ THREADED_TEST(GlobalObjectInstanceProperties) {
// environment initialization.
global_object = env->Global();
- Local<Value> value = Script::Compile(v8_str("x"))->Run();
+ Local<Value> value = CompileRun("x");
CHECK_EQ(42, value->Int32Value());
- value = Script::Compile(v8_str("f()"))->Run();
+ value = CompileRun("f()");
CHECK_EQ(12, value->Int32Value());
- value = Script::Compile(v8_str(script))->Run();
+ value = CompileRun(script);
CHECK_EQ(1, value->Int32Value());
}
{
// Create new environment reusing the global object.
LocalContext env(NULL, instance_template, global_object);
- Local<Value> value = Script::Compile(v8_str("x"))->Run();
+ Local<Value> value = CompileRun("x");
CHECK_EQ(42, value->Int32Value());
- value = Script::Compile(v8_str("f()"))->Run();
+ value = CompileRun("f()");
CHECK_EQ(12, value->Int32Value());
- value = Script::Compile(v8_str(script))->Run();
+ value = CompileRun(script);
CHECK_EQ(1, value->Int32Value());
}
}
@@ -9870,14 +9892,14 @@ THREADED_TEST(CallKnownGlobalReceiver) {
// Hold on to the global object so it can be used again in another
// environment initialization.
global_object = env->Global();
- foo = Script::Compile(v8_str(script))->Run();
+ foo = CompileRun(script);
}
{
// Create new environment reusing the global object.
LocalContext env(NULL, instance_template, global_object);
env->Global()->Set(v8_str("foo"), foo);
- Script::Compile(v8_str("foo()"))->Run();
+ CompileRun("foo()");
}
}
@@ -9946,19 +9968,19 @@ THREADED_TEST(ShadowObject) {
context->Global()->Set(v8_str("__proto__"), o);
Local<Value> value =
- Script::Compile(v8_str("this.propertyIsEnumerable(0)"))->Run();
+ CompileRun("this.propertyIsEnumerable(0)");
CHECK(value->IsBoolean());
CHECK(!value->BooleanValue());
- value = Script::Compile(v8_str("x"))->Run();
+ value = CompileRun("x");
CHECK_EQ(12, value->Int32Value());
- value = Script::Compile(v8_str("f()"))->Run();
+ value = CompileRun("f()");
CHECK_EQ(42, value->Int32Value());
- Script::Compile(v8_str("y = 43"))->Run();
+ CompileRun("y = 43");
CHECK_EQ(1, shadow_y_setter_call_count);
- value = Script::Compile(v8_str("y"))->Run();
+ value = CompileRun("y");
CHECK_EQ(1, shadow_y_getter_call_count);
CHECK_EQ(42, value->Int32Value());
}
@@ -10217,10 +10239,11 @@ THREADED_TEST(Regress269562) {
Local<v8::Object> o2 = t2->GetFunction()->NewInstance();
CHECK(o2->SetPrototype(o1));
- v8::Local<v8::Symbol> sym = v8::Symbol::New(context->GetIsolate(), "s1");
+ v8::Local<v8::Symbol> sym =
+ v8::Symbol::New(context->GetIsolate(), v8_str("s1"));
o1->Set(sym, v8_num(3));
- o1->SetHiddenValue(v8_str("h1"),
- v8::Integer::New(context->GetIsolate(), 2013));
+ o1->SetHiddenValue(
+ v8_str("h1"), v8::Integer::New(context->GetIsolate(), 2013));
// Call the runtime version of GetLocalPropertyNames() on
// the natively created object through JavaScript.
@@ -10582,29 +10605,29 @@ THREADED_TEST(EvalAliasedDynamic) {
v8::HandleScope scope(current->GetIsolate());
// Tests where aliased eval can only be resolved dynamically.
- Local<Script> script =
- Script::Compile(v8_str("function f(x) { "
- " var foo = 2;"
- " with (x) { return eval('foo'); }"
- "}"
- "foo = 0;"
- "result1 = f(new Object());"
- "result2 = f(this);"
- "var x = new Object();"
- "x.eval = function(x) { return 1; };"
- "result3 = f(x);"));
+ Local<Script> script = v8_compile(
+ "function f(x) { "
+ " var foo = 2;"
+ " with (x) { return eval('foo'); }"
+ "}"
+ "foo = 0;"
+ "result1 = f(new Object());"
+ "result2 = f(this);"
+ "var x = new Object();"
+ "x.eval = function(x) { return 1; };"
+ "result3 = f(x);");
script->Run();
CHECK_EQ(2, current->Global()->Get(v8_str("result1"))->Int32Value());
CHECK_EQ(0, current->Global()->Get(v8_str("result2"))->Int32Value());
CHECK_EQ(1, current->Global()->Get(v8_str("result3"))->Int32Value());
v8::TryCatch try_catch;
- script =
- Script::Compile(v8_str("function f(x) { "
- " var bar = 2;"
- " with (x) { return eval('bar'); }"
- "}"
- "result4 = f(this)"));
+ script = v8_compile(
+ "function f(x) { "
+ " var bar = 2;"
+ " with (x) { return eval('bar'); }"
+ "}"
+ "result4 = f(this)");
script->Run();
CHECK(!try_catch.HasCaught());
CHECK_EQ(2, current->Global()->Get(v8_str("result4"))->Int32Value());
@@ -10626,8 +10649,7 @@ THREADED_TEST(CrossEval) {
current->Global()->Set(v8_str("other"), other->Global());
// Check that new variables are introduced in other context.
- Local<Script> script =
- Script::Compile(v8_str("other.eval('var foo = 1234')"));
+ Local<Script> script = v8_compile("other.eval('var foo = 1234')");
script->Run();
Local<Value> foo = other->Global()->Get(v8_str("foo"));
CHECK_EQ(1234, foo->Int32Value());
@@ -10635,8 +10657,7 @@ THREADED_TEST(CrossEval) {
// Check that writing to non-existing properties introduces them in
// the other context.
- script =
- Script::Compile(v8_str("other.eval('na = 1234')"));
+ script = v8_compile("other.eval('na = 1234')");
script->Run();
CHECK_EQ(1234, other->Global()->Get(v8_str("na"))->Int32Value());
CHECK(!current->Global()->Has(v8_str("na")));
@@ -10644,19 +10665,18 @@ THREADED_TEST(CrossEval) {
// Check that global variables in current context are not visible in other
// context.
v8::TryCatch try_catch;
- script =
- Script::Compile(v8_str("var bar = 42; other.eval('bar');"));
+ script = v8_compile("var bar = 42; other.eval('bar');");
Local<Value> result = script->Run();
CHECK(try_catch.HasCaught());
try_catch.Reset();
// Check that local variables in current context are not visible in other
// context.
- script =
- Script::Compile(v8_str("(function() { "
- " var baz = 87;"
- " return other.eval('baz');"
- "})();"));
+ script = v8_compile(
+ "(function() { "
+ " var baz = 87;"
+ " return other.eval('baz');"
+ "})();");
result = script->Run();
CHECK(try_catch.HasCaught());
try_catch.Reset();
@@ -10664,30 +10684,28 @@ THREADED_TEST(CrossEval) {
// Check that global variables in the other environment are visible
// when evaluting code.
other->Global()->Set(v8_str("bis"), v8_num(1234));
- script = Script::Compile(v8_str("other.eval('bis')"));
+ script = v8_compile("other.eval('bis')");
CHECK_EQ(1234, script->Run()->Int32Value());
CHECK(!try_catch.HasCaught());
// Check that the 'this' pointer points to the global object evaluating
// code.
other->Global()->Set(v8_str("t"), other->Global());
- script = Script::Compile(v8_str("other.eval('this == t')"));
+ script = v8_compile("other.eval('this == t')");
result = script->Run();
CHECK(result->IsTrue());
CHECK(!try_catch.HasCaught());
// Check that variables introduced in with-statement are not visible in
// other context.
- script =
- Script::Compile(v8_str("with({x:2}){other.eval('x')}"));
+ script = v8_compile("with({x:2}){other.eval('x')}");
result = script->Run();
CHECK(try_catch.HasCaught());
try_catch.Reset();
// Check that you cannot use 'eval.call' with another object than the
// current global object.
- script =
- Script::Compile(v8_str("other.y = 1; eval.call(other, 'y')"));
+ script = v8_compile("other.y = 1; eval.call(other, 'y')");
result = script->Run();
CHECK(try_catch.HasCaught());
}
@@ -10742,8 +10760,7 @@ THREADED_TEST(CrossLazyLoad) {
current->Global()->Set(v8_str("other"), other->Global());
// Trigger lazy loading in other context.
- Local<Script> script =
- Script::Compile(v8_str("other.eval('new Date(42)')"));
+ Local<Script> script = v8_compile("other.eval('new Date(42)')");
Local<Value> value = script->Run();
CHECK_EQ(42.0, value->NumberValue());
}
@@ -12929,7 +12946,6 @@ static void ChildGetter(Local<String> name,
THREADED_TEST(Overriding) {
- i::FLAG_es5_readonly = true;
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
@@ -14123,7 +14139,8 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
// have remnants of state from other code.
v8::Isolate* isolate = v8::Isolate::New();
isolate->Enter();
- i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Heap* heap = i_isolate->heap();
{
v8::HandleScope scope(isolate);
@@ -14143,7 +14160,7 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
const int kIterations = 10;
for (int i = 0; i < kIterations; ++i) {
LocalContext env(isolate);
- i::AlwaysAllocateScope always_allocate;
+ i::AlwaysAllocateScope always_allocate(i_isolate);
SimulateFullSpace(heap->code_space());
CompileRun(script);
@@ -14248,14 +14265,12 @@ TEST(CatchStackOverflow) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
v8::TryCatch try_catch;
- v8::Handle<v8::Script> script = v8::Script::Compile(v8::String::NewFromUtf8(
- context->GetIsolate(),
+ v8::Handle<v8::Value> result = CompileRun(
"function f() {"
" return f();"
"}"
""
- "f();"));
- v8::Handle<v8::Value> result = script->Run();
+ "f();");
CHECK(result.IsEmpty());
}
@@ -14285,8 +14300,7 @@ static void CheckTryCatchSourceInfo(v8::Handle<v8::Script> script,
THREADED_TEST(TryCatchSourceInfo) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::String> source = v8::String::NewFromUtf8(
- context->GetIsolate(),
+ v8::Local<v8::String> source = v8_str(
"function Foo() {\n"
" return Bar();\n"
"}\n"
@@ -14304,8 +14318,7 @@ THREADED_TEST(TryCatchSourceInfo) {
const char* resource_name;
v8::Handle<v8::Script> script;
resource_name = "test.js";
- script = v8::Script::Compile(
- source, v8::String::NewFromUtf8(context->GetIsolate(), resource_name));
+ script = CompileWithOrigin(source, resource_name);
CheckTryCatchSourceInfo(script, resource_name, 0);
resource_name = "test1.js";
@@ -14330,10 +14343,8 @@ THREADED_TEST(CompilationCache) {
v8::String::NewFromUtf8(context->GetIsolate(), "1234");
v8::Handle<v8::String> source1 =
v8::String::NewFromUtf8(context->GetIsolate(), "1234");
- v8::Handle<v8::Script> script0 = v8::Script::Compile(
- source0, v8::String::NewFromUtf8(context->GetIsolate(), "test.js"));
- v8::Handle<v8::Script> script1 = v8::Script::Compile(
- source1, v8::String::NewFromUtf8(context->GetIsolate(), "test.js"));
+ v8::Handle<v8::Script> script0 = CompileWithOrigin(source0, "test.js");
+ v8::Handle<v8::Script> script1 = CompileWithOrigin(source1, "test.js");
v8::Handle<v8::Script> script2 =
v8::Script::Compile(source0); // different origin
CHECK_EQ(1234, script0->Run()->Int32Value());
@@ -14406,8 +14417,7 @@ THREADED_TEST(PropertyEnumeration) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::Value> obj = v8::Script::Compile(v8::String::NewFromUtf8(
- context->GetIsolate(),
+ v8::Handle<v8::Value> obj = CompileRun(
"var result = [];"
"result[0] = {};"
"result[1] = {a: 1, b: 2};"
@@ -14415,7 +14425,7 @@ THREADED_TEST(PropertyEnumeration) {
"var proto = {x: 1, y: 2, z: 3};"
"var x = { __proto__: proto, w: 0, z: 1 };"
"result[3] = x;"
- "result;"))->Run();
+ "result;");
v8::Handle<v8::Array> elms = obj.As<v8::Array>();
CHECK_EQ(4, elms->Length());
int elmc0 = 0;
@@ -14451,8 +14461,7 @@ THREADED_TEST(PropertyEnumeration2) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::Value> obj = v8::Script::Compile(v8::String::NewFromUtf8(
- context->GetIsolate(),
+ v8::Handle<v8::Value> obj = CompileRun(
"var result = [];"
"result[0] = {};"
"result[1] = {a: 1, b: 2};"
@@ -14460,7 +14469,7 @@ THREADED_TEST(PropertyEnumeration2) {
"var proto = {x: 1, y: 2, z: 3};"
"var x = { __proto__: proto, w: 0, z: 1 };"
"result[3] = x;"
- "result;"))->Run();
+ "result;");
v8::Handle<v8::Array> elms = obj.As<v8::Array>();
CHECK_EQ(4, elms->Length());
int elmc0 = 0;
@@ -14866,8 +14875,13 @@ TEST(PreCompileInvalidPreparseDataError) {
sd_data[kHeaderSize + 1 * kFunctionEntrySize + kFunctionEntryEndOffset] = 0;
v8::TryCatch try_catch;
- Local<String> source = String::NewFromUtf8(isolate, script);
- Local<Script> compiled_script = Script::New(source, NULL, sd);
+ v8::ScriptCompiler::Source script_source(
+ String::NewFromUtf8(isolate, script),
+ new v8::ScriptCompiler::CachedData(
+ reinterpret_cast<const uint8_t*>(sd->Data()), sd->Length()));
+ Local<v8::UnboundScript> compiled_script =
+ v8::ScriptCompiler::CompileUnbound(isolate, &script_source);
+
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value(try_catch.Message()->Get());
CHECK_EQ("Uncaught SyntaxError: Invalid preparser data for function bar",
@@ -14884,7 +14898,12 @@ TEST(PreCompileInvalidPreparseDataError) {
sd_data = reinterpret_cast<unsigned*>(const_cast<char*>(sd->Data()));
sd_data[kHeaderSize + 1 * kFunctionEntrySize + kFunctionEntryStartOffset] =
200;
- compiled_script = Script::New(source, NULL, sd);
+ v8::ScriptCompiler::Source script_source2(
+ String::NewFromUtf8(isolate, script),
+ new v8::ScriptCompiler::CachedData(
+ reinterpret_cast<const uint8_t*>(sd->Data()), sd->Length()));
+ compiled_script =
+ v8::ScriptCompiler::CompileUnbound(isolate, &script_source2);
CHECK(!try_catch.HasCaught());
delete sd;
@@ -15200,7 +15219,6 @@ TEST(RegExpInterruption) {
// Test that we cannot set a property on the global object if there
// is a read-only property in the prototype chain.
TEST(ReadOnlyPropertyInGlobalProto) {
- i::FLAG_es5_readonly = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
@@ -15555,7 +15573,6 @@ THREADED_TEST(GetCallingContext) {
// Check that a variable declaration with no explicit initialization
// value does shadow an existing property in the prototype chain.
THREADED_TEST(InitGlobalVarInProtoChain) {
- i::FLAG_es52_globals = true;
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
// Introduce a variable in the prototype chain.
@@ -15592,7 +15609,7 @@ static void CheckElementValue(i::Isolate* isolate,
int expected,
i::Handle<i::Object> obj,
int offset) {
- i::Object* element = obj->GetElement(isolate, offset)->ToObjectChecked();
+ i::Object* element = *i::Object::GetElement(isolate, obj, offset);
CHECK_EQ(expected, i::Smi::cast(element)->value());
}
@@ -15679,20 +15696,17 @@ THREADED_TEST(PixelArray) {
i::Handle<i::Smi> value(i::Smi::FromInt(2),
reinterpret_cast<i::Isolate*>(context->GetIsolate()));
i::Handle<i::Object> no_failure;
- no_failure =
- i::JSObject::SetElement(jsobj, 1, value, NONE, i::kNonStrictMode);
+ no_failure = i::JSObject::SetElement(jsobj, 1, value, NONE, i::SLOPPY);
ASSERT(!no_failure.is_null());
i::USE(no_failure);
CheckElementValue(isolate, 2, jsobj, 1);
*value.location() = i::Smi::FromInt(256);
- no_failure =
- i::JSObject::SetElement(jsobj, 1, value, NONE, i::kNonStrictMode);
+ no_failure = i::JSObject::SetElement(jsobj, 1, value, NONE, i::SLOPPY);
ASSERT(!no_failure.is_null());
i::USE(no_failure);
CheckElementValue(isolate, 255, jsobj, 1);
*value.location() = i::Smi::FromInt(-1);
- no_failure =
- i::JSObject::SetElement(jsobj, 1, value, NONE, i::kNonStrictMode);
+ no_failure = i::JSObject::SetElement(jsobj, 1, value, NONE, i::SLOPPY);
ASSERT(!no_failure.is_null());
i::USE(no_failure);
CheckElementValue(isolate, 0, jsobj, 1);
@@ -16229,7 +16243,7 @@ static void ObjectWithExternalArrayTestHelper(
array_type == v8::kExternalFloat32Array) {
CHECK_EQ(static_cast<int>(i::OS::nan_value()),
static_cast<int>(
- jsobj->GetElement(isolate, 7)->ToObjectChecked()->Number()));
+ i::Object::GetElement(isolate, jsobj, 7)->Number()));
} else {
CheckElementValue(isolate, 0, jsobj, 7);
}
@@ -16241,7 +16255,7 @@ static void ObjectWithExternalArrayTestHelper(
CHECK_EQ(2, result->Int32Value());
CHECK_EQ(2,
static_cast<int>(
- jsobj->GetElement(isolate, 6)->ToObjectChecked()->Number()));
+ i::Object::GetElement(isolate, jsobj, 6)->Number()));
if (array_type != v8::kExternalFloat32Array &&
array_type != v8::kExternalFloat64Array) {
@@ -16410,7 +16424,7 @@ static void FixedTypedArrayTestHelper(
v8::Handle<v8::Object> obj = v8::Object::New(CcTest::isolate());
i::Handle<i::JSObject> jsobj = v8::Utils::OpenHandle(*obj);
i::Handle<i::Map> fixed_array_map =
- isolate->factory()->GetElementsTransitionMap(jsobj, elements_kind);
+ i::JSObject::GetElementsTransitionMap(jsobj, elements_kind);
jsobj->set_map(*fixed_array_map);
jsobj->set_elements(*fixed_array);
@@ -16521,7 +16535,7 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
kElementCount);
CHECK_EQ(1,
static_cast<int>(
- jsobj->GetElement(isolate, 1)->ToObjectChecked()->Number()));
+ i::Object::GetElement(isolate, jsobj, 1)->Number()));
ObjectWithExternalArrayTestHelper<ExternalArrayClass, ElementType>(
context.local(), obj, kElementCount, array_type, low, high);
@@ -16998,19 +17012,20 @@ THREADED_TEST(ScriptContextDependence) {
LocalContext c1;
v8::HandleScope scope(c1->GetIsolate());
const char *source = "foo";
- v8::Handle<v8::Script> dep =
- v8::Script::Compile(v8::String::NewFromUtf8(c1->GetIsolate(), source));
- v8::Handle<v8::Script> indep =
- v8::Script::New(v8::String::NewFromUtf8(c1->GetIsolate(), source));
+ v8::Handle<v8::Script> dep = v8_compile(source);
+ v8::ScriptCompiler::Source script_source(v8::String::NewFromUtf8(
+ c1->GetIsolate(), source));
+ v8::Handle<v8::UnboundScript> indep =
+ v8::ScriptCompiler::CompileUnbound(c1->GetIsolate(), &script_source);
c1->Global()->Set(v8::String::NewFromUtf8(c1->GetIsolate(), "foo"),
v8::Integer::New(c1->GetIsolate(), 100));
CHECK_EQ(dep->Run()->Int32Value(), 100);
- CHECK_EQ(indep->Run()->Int32Value(), 100);
+ CHECK_EQ(indep->BindToCurrentContext()->Run()->Int32Value(), 100);
LocalContext c2;
c2->Global()->Set(v8::String::NewFromUtf8(c2->GetIsolate(), "foo"),
v8::Integer::New(c2->GetIsolate(), 101));
CHECK_EQ(dep->Run()->Int32Value(), 100);
- CHECK_EQ(indep->Run()->Int32Value(), 101);
+ CHECK_EQ(indep->BindToCurrentContext()->Run()->Int32Value(), 101);
}
@@ -17023,7 +17038,10 @@ THREADED_TEST(StackTrace) {
v8::String::NewFromUtf8(context->GetIsolate(), source);
v8::Handle<v8::String> origin =
v8::String::NewFromUtf8(context->GetIsolate(), "stack-trace-test");
- v8::Script::New(src, origin)->Run();
+ v8::ScriptCompiler::Source script_source(src, v8::ScriptOrigin(origin));
+ v8::ScriptCompiler::CompileUnbound(context->GetIsolate(), &script_source)
+ ->BindToCurrentContext()
+ ->Run();
CHECK(try_catch.HasCaught());
v8::String::Utf8Value stack(try_catch.StackTrace());
CHECK(strstr(*stack, "at foo (stack-trace-test") != NULL);
@@ -17129,8 +17147,12 @@ TEST(CaptureStackTrace) {
"var x;eval('new foo();');";
v8::Handle<v8::String> overview_src =
v8::String::NewFromUtf8(isolate, overview_source);
+ v8::ScriptCompiler::Source script_source(overview_src,
+ v8::ScriptOrigin(origin));
v8::Handle<Value> overview_result(
- v8::Script::New(overview_src, origin)->Run());
+ v8::ScriptCompiler::CompileUnbound(isolate, &script_source)
+ ->BindToCurrentContext()
+ ->Run());
CHECK(!overview_result.IsEmpty());
CHECK(overview_result->IsObject());
@@ -17149,9 +17171,11 @@ TEST(CaptureStackTrace) {
v8::Handle<v8::Integer> line_offset = v8::Integer::New(isolate, 3);
v8::Handle<v8::Integer> column_offset = v8::Integer::New(isolate, 5);
v8::ScriptOrigin detailed_origin(origin, line_offset, column_offset);
- v8::Handle<v8::Script> detailed_script(
- v8::Script::New(detailed_src, &detailed_origin));
- v8::Handle<Value> detailed_result(detailed_script->Run());
+ v8::ScriptCompiler::Source script_source2(detailed_src, detailed_origin);
+ v8::Handle<v8::UnboundScript> detailed_script(
+ v8::ScriptCompiler::CompileUnbound(isolate, &script_source2));
+ v8::Handle<Value> detailed_result(
+ detailed_script->BindToCurrentContext()->Run());
CHECK(!detailed_result.IsEmpty());
CHECK(detailed_result->IsObject());
}
@@ -17176,13 +17200,14 @@ TEST(CaptureStackTraceForUncaughtException) {
v8::V8::AddMessageListener(StackTraceForUncaughtExceptionListener);
v8::V8::SetCaptureStackTraceForUncaughtExceptions(true);
- Script::Compile(v8_str("function foo() {\n"
- " throw 1;\n"
- "};\n"
- "function bar() {\n"
- " foo();\n"
- "};"),
- v8_str("origin"))->Run();
+ CompileRunWithOrigin(
+ "function foo() {\n"
+ " throw 1;\n"
+ "};\n"
+ "function bar() {\n"
+ " foo();\n"
+ "};",
+ "origin");
v8::Local<v8::Object> global = env->Global();
Local<Value> trouble = global->Get(v8_str("bar"));
CHECK(trouble->IsFunction());
@@ -17417,9 +17442,7 @@ TEST(ScriptIdInStackTrace) {
" AnalyzeScriptIdInStack();"
"}\n"
"foo();\n");
- v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::NewFromUtf8(isolate, "test"));
- v8::Local<v8::Script> script(v8::Script::Compile(scriptSource, &origin));
+ v8::Local<v8::Script> script = CompileWithOrigin(scriptSource, "test");
script->Run();
for (int i = 0; i < 2; i++) {
CHECK(scriptIdInStack[i] != v8::Message::kNoScriptIdInfo);
@@ -17520,10 +17543,33 @@ TEST(DynamicWithSourceURLInStackTrace) {
}
+TEST(DynamicWithSourceURLInStackTraceString) {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+
+ const char *source =
+ "function outer() {\n"
+ " function foo() {\n"
+ " FAIL.FAIL;\n"
+ " }\n"
+ " foo();\n"
+ "}\n"
+ "outer()\n%s";
+
+ i::ScopedVector<char> code(1024);
+ i::OS::SNPrintF(code, source, "//# sourceURL=source_url");
+ v8::TryCatch try_catch;
+ CompileRunWithOrigin(code.start(), "", 0, 0);
+ CHECK(try_catch.HasCaught());
+ v8::String::Utf8Value stack(try_catch.StackTrace());
+ CHECK(strstr(*stack, "at foo (source_url:3:5)") != NULL);
+}
+
+
static void CreateGarbageInOldSpace() {
i::Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(CcTest::isolate());
- i::AlwaysAllocateScope always_allocate;
+ i::AlwaysAllocateScope always_allocate(CcTest::i_isolate());
for (int i = 0; i < 1000; i++) {
factory->NewFixedArray(1000, i::TENURED);
}
@@ -17627,7 +17673,7 @@ TEST(Regress2107) {
TEST(Regress2333) {
LocalContext env;
for (int i = 0; i < 3; i++) {
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
}
}
@@ -18421,14 +18467,14 @@ TEST(SetterOnConstructorPrototype) {
"C2.prototype.__proto__ = P;");
v8::Local<v8::Script> script;
- script = v8::Script::Compile(v8_str("new C1();"));
+ script = v8_compile("new C1();");
for (int i = 0; i < 10; i++) {
v8::Handle<v8::Object> c1 = v8::Handle<v8::Object>::Cast(script->Run());
CHECK_EQ(42, c1->Get(v8_str("x"))->Int32Value());
CHECK_EQ(23, c1->Get(v8_str("y"))->Int32Value());
}
- script = v8::Script::Compile(v8_str("new C2();"));
+script = v8_compile("new C2();");
for (int i = 0; i < 10; i++) {
v8::Handle<v8::Object> c2 = v8::Handle<v8::Object>::Cast(script->Run());
CHECK_EQ(42, c2->Get(v8_str("x"))->Int32Value());
@@ -18473,14 +18519,14 @@ THREADED_TEST(InterceptorOnConstructorPrototype) {
"C2.prototype.__proto__ = P;");
v8::Local<v8::Script> script;
- script = v8::Script::Compile(v8_str("new C1();"));
+ script = v8_compile("new C1();");
for (int i = 0; i < 10; i++) {
v8::Handle<v8::Object> c1 = v8::Handle<v8::Object>::Cast(script->Run());
CHECK_EQ(23, c1->Get(v8_str("x"))->Int32Value());
CHECK_EQ(42, c1->Get(v8_str("y"))->Int32Value());
}
- script = v8::Script::Compile(v8_str("new C2();"));
+ script = v8_compile("new C2();");
for (int i = 0; i < 10; i++) {
v8::Handle<v8::Object> c2 = v8::Handle<v8::Object>::Cast(script->Run());
CHECK_EQ(23, c2->Get(v8_str("x"))->Int32Value());
@@ -18508,7 +18554,7 @@ TEST(Regress618) {
// This compile will add the code to the compilation cache.
CompileRun(source);
- script = v8::Script::Compile(v8_str("new C1();"));
+ script = v8_compile("new C1();");
// Allow enough iterations for the inobject slack tracking logic
// to finalize instance size and install the fast construct stub.
for (int i = 0; i < 256; i++) {
@@ -18527,7 +18573,7 @@ TEST(Regress618) {
// This compile will get the code from the compilation cache.
CompileRun(source);
- script = v8::Script::Compile(v8_str("new C1();"));
+ script = v8_compile("new C1();");
for (int i = 0; i < 10; i++) {
v8::Handle<v8::Object> c1 = v8::Handle<v8::Object>::Cast(script->Run());
CHECK_EQ(42, c1->Get(v8_str("x"))->Int32Value());
@@ -18540,6 +18586,8 @@ int prologue_call_count = 0;
int epilogue_call_count = 0;
int prologue_call_count_second = 0;
int epilogue_call_count_second = 0;
+int prologue_call_count_alloc = 0;
+int epilogue_call_count_alloc = 0;
void PrologueCallback(v8::GCType, v8::GCCallbackFlags flags) {
CHECK_EQ(flags, v8::kNoGCCallbackFlags);
@@ -18601,6 +18649,46 @@ void EpilogueCallbackSecond(v8::Isolate* isolate,
}
+void PrologueCallbackAlloc(v8::Isolate* isolate,
+ v8::GCType,
+ v8::GCCallbackFlags flags) {
+ v8::HandleScope scope(isolate);
+
+ CHECK_EQ(flags, v8::kNoGCCallbackFlags);
+ CHECK_EQ(gc_callbacks_isolate, isolate);
+ ++prologue_call_count_alloc;
+
+ // Simulate full heap to see if we will reenter this callback
+ SimulateFullSpace(CcTest::heap()->new_space());
+
+ Local<Object> obj = Object::New(isolate);
+ CHECK(!obj.IsEmpty());
+
+ CcTest::heap()->CollectAllGarbage(
+ i::Heap::kAbortIncrementalMarkingMask);
+}
+
+
+void EpilogueCallbackAlloc(v8::Isolate* isolate,
+ v8::GCType,
+ v8::GCCallbackFlags flags) {
+ v8::HandleScope scope(isolate);
+
+ CHECK_EQ(flags, v8::kNoGCCallbackFlags);
+ CHECK_EQ(gc_callbacks_isolate, isolate);
+ ++epilogue_call_count_alloc;
+
+ // Simulate full heap to see if we will reenter this callback
+ SimulateFullSpace(CcTest::heap()->new_space());
+
+ Local<Object> obj = Object::New(isolate);
+ CHECK(!obj.IsEmpty());
+
+ CcTest::heap()->CollectAllGarbage(
+ i::Heap::kAbortIncrementalMarkingMask);
+}
+
+
TEST(GCCallbacksOld) {
LocalContext context;
@@ -18667,6 +18755,17 @@ TEST(GCCallbacks) {
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
CHECK_EQ(2, epilogue_call_count_second);
+
+ CHECK_EQ(0, prologue_call_count_alloc);
+ CHECK_EQ(0, epilogue_call_count_alloc);
+ isolate->AddGCPrologueCallback(PrologueCallbackAlloc);
+ isolate->AddGCEpilogueCallback(EpilogueCallbackAlloc);
+ CcTest::heap()->CollectAllGarbage(
+ i::Heap::kAbortIncrementalMarkingMask);
+ CHECK_EQ(1, prologue_call_count_alloc);
+ CHECK_EQ(1, epilogue_call_count_alloc);
+ isolate->RemoveGCPrologueCallback(PrologueCallbackAlloc);
+ isolate->RemoveGCEpilogueCallback(EpilogueCallbackAlloc);
}
@@ -19284,7 +19383,6 @@ TEST(IsolateDifferentContexts) {
class InitDefaultIsolateThread : public v8::internal::Thread {
public:
enum TestCase {
- IgnoreOOM,
SetResourceConstraints,
SetFatalHandler,
SetCounterFunction,
@@ -19301,34 +19399,30 @@ class InitDefaultIsolateThread : public v8::internal::Thread {
v8::Isolate* isolate = v8::Isolate::New();
isolate->Enter();
switch (testCase_) {
- case IgnoreOOM:
- v8::V8::IgnoreOutOfMemoryException();
- break;
-
- case SetResourceConstraints: {
- static const int K = 1024;
- v8::ResourceConstraints constraints;
- constraints.set_max_young_space_size(256 * K);
- constraints.set_max_old_space_size(4 * K * K);
- v8::SetResourceConstraints(CcTest::isolate(), &constraints);
- break;
- }
+ case SetResourceConstraints: {
+ static const int K = 1024;
+ v8::ResourceConstraints constraints;
+ constraints.set_max_young_space_size(256 * K);
+ constraints.set_max_old_space_size(4 * K * K);
+ v8::SetResourceConstraints(CcTest::isolate(), &constraints);
+ break;
+ }
- case SetFatalHandler:
- v8::V8::SetFatalErrorHandler(NULL);
- break;
+ case SetFatalHandler:
+ v8::V8::SetFatalErrorHandler(NULL);
+ break;
- case SetCounterFunction:
- v8::V8::SetCounterFunction(NULL);
- break;
+ case SetCounterFunction:
+ v8::V8::SetCounterFunction(NULL);
+ break;
- case SetCreateHistogramFunction:
- v8::V8::SetCreateHistogramFunction(NULL);
- break;
+ case SetCreateHistogramFunction:
+ v8::V8::SetCreateHistogramFunction(NULL);
+ break;
- case SetAddHistogramSampleFunction:
- v8::V8::SetAddHistogramSampleFunction(NULL);
- break;
+ case SetAddHistogramSampleFunction:
+ v8::V8::SetAddHistogramSampleFunction(NULL);
+ break;
}
isolate->Exit();
isolate->Dispose();
@@ -19352,31 +19446,26 @@ static void InitializeTestHelper(InitDefaultIsolateThread::TestCase testCase) {
TEST(InitializeDefaultIsolateOnSecondaryThread1) {
- InitializeTestHelper(InitDefaultIsolateThread::IgnoreOOM);
-}
-
-
-TEST(InitializeDefaultIsolateOnSecondaryThread2) {
InitializeTestHelper(InitDefaultIsolateThread::SetResourceConstraints);
}
-TEST(InitializeDefaultIsolateOnSecondaryThread3) {
+TEST(InitializeDefaultIsolateOnSecondaryThread2) {
InitializeTestHelper(InitDefaultIsolateThread::SetFatalHandler);
}
-TEST(InitializeDefaultIsolateOnSecondaryThread4) {
+TEST(InitializeDefaultIsolateOnSecondaryThread3) {
InitializeTestHelper(InitDefaultIsolateThread::SetCounterFunction);
}
-TEST(InitializeDefaultIsolateOnSecondaryThread5) {
+TEST(InitializeDefaultIsolateOnSecondaryThread4) {
InitializeTestHelper(InitDefaultIsolateThread::SetCreateHistogramFunction);
}
-TEST(InitializeDefaultIsolateOnSecondaryThread6) {
+TEST(InitializeDefaultIsolateOnSecondaryThread5) {
InitializeTestHelper(InitDefaultIsolateThread::SetAddHistogramSampleFunction);
}
@@ -20491,6 +20580,102 @@ TEST(CallCompletedCallbackTwoExceptions) {
}
+static void MicrotaskOne(const v8::FunctionCallbackInfo<Value>& info) {
+ v8::HandleScope scope(info.GetIsolate());
+ CompileRun("ext1Calls++;");
+}
+
+
+static void MicrotaskTwo(const v8::FunctionCallbackInfo<Value>& info) {
+ v8::HandleScope scope(info.GetIsolate());
+ CompileRun("ext2Calls++;");
+}
+
+
+TEST(EnqueueMicrotask) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ CompileRun(
+ "var ext1Calls = 0;"
+ "var ext2Calls = 0;");
+ CompileRun("1+1;");
+ CHECK_EQ(0, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskOne));
+ CompileRun("1+1;");
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskOne));
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskTwo));
+ CompileRun("1+1;");
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value());
+
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskTwo));
+ CompileRun("1+1;");
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value());
+
+ CompileRun("1+1;");
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value());
+}
+
+
+TEST(SetAutorunMicrotasks) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ CompileRun(
+ "var ext1Calls = 0;"
+ "var ext2Calls = 0;");
+ CompileRun("1+1;");
+ CHECK_EQ(0, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskOne));
+ CompileRun("1+1;");
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+
+ V8::SetAutorunMicrotasks(env->GetIsolate(), false);
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskOne));
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskTwo));
+ CompileRun("1+1;");
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+
+ V8::RunMicrotasks(env->GetIsolate());
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value());
+
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskTwo));
+ CompileRun("1+1;");
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value());
+
+ V8::RunMicrotasks(env->GetIsolate());
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value());
+
+ V8::SetAutorunMicrotasks(env->GetIsolate(), true);
+ v8::V8::EnqueueMicrotask(env->GetIsolate(),
+ Function::New(env->GetIsolate(), MicrotaskTwo));
+ CompileRun("1+1;");
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(3, CompileRun("ext2Calls")->Int32Value());
+}
+
+
static int probes_counter = 0;
static int misses_counter = 0;
static int updates_counter = 0;
@@ -21735,8 +21920,9 @@ THREADED_TEST(FunctionNew) {
i::Smi::cast(v8::Utils::OpenHandle(*func)
->shared()->get_api_func_data()->serial_number())->value();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::Object* elm = i_isolate->native_context()->function_cache()
- ->GetElementNoExceptionThrown(i_isolate, serial_number);
+ i::Handle<i::JSObject> cache(i_isolate->native_context()->function_cache());
+ i::Handle<i::Object> elm =
+ i::Object::GetElementNoExceptionThrown(i_isolate, cache, serial_number);
CHECK(elm->IsUndefined());
// Verify that each Function::New creates a new function instance
Local<Object> data2 = v8::Object::New(isolate);
@@ -21814,29 +22000,31 @@ class ApiCallOptimizationChecker {
}
CHECK(holder == info.Holder());
count++;
- }
-
- // TODO(dcarney): move this to v8.h
- static void SetAccessorProperty(Local<Object> object,
- Local<String> name,
- Local<Function> getter,
- Local<Function> setter = Local<Function>()) {
- i::Isolate* isolate = CcTest::i_isolate();
- v8::AccessControl settings = v8::DEFAULT;
- v8::PropertyAttribute attribute = v8::None;
- i::Handle<i::Object> getter_i = v8::Utils::OpenHandle(*getter);
- i::Handle<i::Object> setter_i = v8::Utils::OpenHandle(*setter, true);
- if (setter_i.is_null()) setter_i = isolate->factory()->null_value();
- i::JSObject::DefineAccessor(v8::Utils::OpenHandle(*object),
- v8::Utils::OpenHandle(*name),
- getter_i,
- setter_i,
- static_cast<PropertyAttributes>(attribute),
- settings);
+ info.GetReturnValue().Set(v8_str("returned"));
}
public:
- void Run(bool use_signature, bool global) {
+ enum SignatureType {
+ kNoSignature,
+ kSignatureOnReceiver,
+ kSignatureOnPrototype
+ };
+
+ void RunAll() {
+ SignatureType signature_types[] =
+ {kNoSignature, kSignatureOnReceiver, kSignatureOnPrototype};
+ for (unsigned i = 0; i < ARRAY_SIZE(signature_types); i++) {
+ SignatureType signature_type = signature_types[i];
+ for (int j = 0; j < 2; j++) {
+ bool global = j == 0;
+ int key = signature_type +
+ ARRAY_SIZE(signature_types) * (global ? 1 : 0);
+ Run(signature_type, global, key);
+ }
+ }
+ }
+
+ void Run(SignatureType signature_type, bool global, int key) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
// Build a template for signature checks.
@@ -21849,8 +22037,15 @@ class ApiCallOptimizationChecker {
Local<v8::FunctionTemplate> function_template
= FunctionTemplate::New(isolate);
function_template->Inherit(parent_template);
- if (use_signature) {
- signature = v8::Signature::New(isolate, parent_template);
+ switch (signature_type) {
+ case kNoSignature:
+ break;
+ case kSignatureOnReceiver:
+ signature = v8::Signature::New(isolate, function_template);
+ break;
+ case kSignatureOnPrototype:
+ signature = v8::Signature::New(isolate, parent_template);
+ break;
}
signature_template = function_template->InstanceTemplate();
}
@@ -21864,19 +22059,21 @@ class ApiCallOptimizationChecker {
// Get the holder objects.
Local<Object> inner_global =
Local<Object>::Cast(context->Global()->GetPrototype());
- Local<Object> function_holder =
- Local<Object>::Cast(function_receiver->GetPrototype());
- // Install function on hidden prototype object.
+ // Install functions on hidden prototype object if there is one.
data = Object::New(isolate);
Local<FunctionTemplate> function_template = FunctionTemplate::New(
isolate, OptimizationCallback, data, signature);
Local<Function> function = function_template->GetFunction();
- Local<Object> global_holder = Local<Object>::Cast(
- inner_global->GetPrototype());
+ Local<Object> global_holder = inner_global;
+ Local<Object> function_holder = function_receiver;
+ if (signature_type == kSignatureOnPrototype) {
+ function_holder = Local<Object>::Cast(function_holder->GetPrototype());
+ global_holder = Local<Object>::Cast(global_holder->GetPrototype());
+ }
global_holder->Set(v8_str("g_f"), function);
- SetAccessorProperty(global_holder, v8_str("g_acc"), function, function);
+ global_holder->SetAccessorProperty(v8_str("g_acc"), function, function);
function_holder->Set(v8_str("f"), function);
- SetAccessorProperty(function_holder, v8_str("acc"), function, function);
+ function_holder->SetAccessorProperty(v8_str("acc"), function, function);
// Initialize expected values.
callee = function;
count = 0;
@@ -21887,7 +22084,7 @@ class ApiCallOptimizationChecker {
holder = function_receiver;
// If not using a signature, add something else to the prototype chain
// to test the case that holder != receiver
- if (!use_signature) {
+ if (signature_type == kNoSignature) {
receiver = Local<Object>::Cast(CompileRun(
"var receiver_subclass = {};\n"
"receiver_subclass.__proto__ = function_receiver;\n"
@@ -21899,48 +22096,53 @@ class ApiCallOptimizationChecker {
}
}
// With no signature, the holder is not set.
- if (!use_signature) holder = receiver;
+ if (signature_type == kNoSignature) holder = receiver;
// build wrap_function
- int key = (use_signature ? 1 : 0) + 2 * (global ? 1 : 0);
i::ScopedVector<char> wrap_function(200);
if (global) {
i::OS::SNPrintF(
wrap_function,
"function wrap_f_%d() { var f = g_f; return f(); }\n"
"function wrap_get_%d() { return this.g_acc; }\n"
- "function wrap_set_%d() { this.g_acc = 1; }\n",
+ "function wrap_set_%d() { return this.g_acc = 1; }\n",
key, key, key);
} else {
i::OS::SNPrintF(
wrap_function,
"function wrap_f_%d() { return receiver_subclass.f(); }\n"
"function wrap_get_%d() { return receiver_subclass.acc; }\n"
- "function wrap_set_%d() { receiver_subclass.acc = 1; }\n",
+ "function wrap_set_%d() { return receiver_subclass.acc = 1; }\n",
key, key, key);
}
// build source string
- i::ScopedVector<char> source(500);
+ i::ScopedVector<char> source(1000);
i::OS::SNPrintF(
source,
"%s\n" // wrap functions
- "function wrap_f() { wrap_f_%d(); }\n"
- "function wrap_get() { wrap_get_%d(); }\n"
- "function wrap_set() { wrap_set_%d(); }\n"
+ "function wrap_f() { return wrap_f_%d(); }\n"
+ "function wrap_get() { return wrap_get_%d(); }\n"
+ "function wrap_set() { return wrap_set_%d(); }\n"
+ "check = function(returned) {\n"
+ " if (returned !== 'returned') { throw returned; }\n"
+ "}\n"
"\n"
- "wrap_f();\n"
- "wrap_f();\n"
+ "check(wrap_f());\n"
+ "check(wrap_f());\n"
"%%OptimizeFunctionOnNextCall(wrap_f_%d);\n"
- "wrap_f();\n"
+ "check(wrap_f());\n"
"\n"
- "wrap_get();\n"
- "wrap_get();\n"
+ "check(wrap_get());\n"
+ "check(wrap_get());\n"
"%%OptimizeFunctionOnNextCall(wrap_get_%d);\n"
- "wrap_get();\n"
+ "check(wrap_get());\n"
"\n"
- "wrap_set();\n"
- "wrap_set();\n"
+ "check = function(returned) {\n"
+ " if (returned !== 1) { throw returned; }\n"
+ "}\n"
+ "check(wrap_set());\n"
+ "check(wrap_set());\n"
"%%OptimizeFunctionOnNextCall(wrap_set_%d);\n"
- "wrap_set();\n",
+ "check(wrap_set());\n",
wrap_function.start(), key, key, key, key, key, key);
v8::TryCatch try_catch;
CompileRun(source.start());
@@ -21960,10 +22162,161 @@ int ApiCallOptimizationChecker::count = 0;
TEST(TestFunctionCallOptimization) {
i::FLAG_allow_natives_syntax = true;
ApiCallOptimizationChecker checker;
- checker.Run(true, true);
- checker.Run(false, true);
- checker.Run(true, false);
- checker.Run(false, false);
+ checker.RunAll();
+}
+
+
+static const char* last_event_message;
+static int last_event_status;
+void StoringEventLoggerCallback(const char* message, int status) {
+ last_event_message = message;
+ last_event_status = status;
+}
+
+
+TEST(EventLogging) {
+ v8::Isolate* isolate = CcTest::isolate();
+ isolate->SetEventLogger(StoringEventLoggerCallback);
+ v8::internal::HistogramTimer* histogramTimer =
+ new v8::internal::HistogramTimer(
+ "V8.Test", 0, 10000, 50,
+ reinterpret_cast<v8::internal::Isolate*>(isolate));
+ histogramTimer->Start();
+ CHECK_EQ("V8.Test", last_event_message);
+ CHECK_EQ(0, last_event_status);
+ histogramTimer->Stop();
+ CHECK_EQ("V8.Test", last_event_message);
+ CHECK_EQ(1, last_event_status);
+}
+
+
+TEST(Promises) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Handle<Object> global = context->Global();
+
+ // Creation.
+ Handle<v8::Promise::Resolver> pr = v8::Promise::Resolver::New(isolate);
+ Handle<v8::Promise::Resolver> rr = v8::Promise::Resolver::New(isolate);
+ Handle<v8::Promise> p = pr->GetPromise();
+ Handle<v8::Promise> r = rr->GetPromise();
+
+ // IsPromise predicate.
+ CHECK(p->IsPromise());
+ CHECK(r->IsPromise());
+ Handle<Value> o = v8::Object::New(isolate);
+ CHECK(!o->IsPromise());
+
+ // Resolution and rejection.
+ pr->Resolve(v8::Integer::New(isolate, 1));
+ CHECK(p->IsPromise());
+ rr->Reject(v8::Integer::New(isolate, 2));
+ CHECK(r->IsPromise());
+
+ // Chaining non-pending promises.
+ CompileRun(
+ "var x1 = 0;\n"
+ "var x2 = 0;\n"
+ "function f1(x) { x1 = x; return x+1 };\n"
+ "function f2(x) { x2 = x; return x+1 };\n");
+ Handle<Function> f1 = Handle<Function>::Cast(global->Get(v8_str("f1")));
+ Handle<Function> f2 = Handle<Function>::Cast(global->Get(v8_str("f2")));
+
+ p->Chain(f1);
+ CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
+ V8::RunMicrotasks(isolate);
+ CHECK_EQ(1, global->Get(v8_str("x1"))->Int32Value());
+
+ p->Catch(f2);
+ V8::RunMicrotasks(isolate);
+ CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
+
+ r->Catch(f2);
+ CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
+ V8::RunMicrotasks(isolate);
+ CHECK_EQ(2, global->Get(v8_str("x2"))->Int32Value());
+
+ r->Chain(f1);
+ V8::RunMicrotasks(isolate);
+ CHECK_EQ(1, global->Get(v8_str("x1"))->Int32Value());
+
+ // Chaining pending promises.
+ CompileRun("x1 = x2 = 0;");
+ pr = v8::Promise::Resolver::New(isolate);
+ rr = v8::Promise::Resolver::New(isolate);
+
+ pr->GetPromise()->Chain(f1);
+ rr->GetPromise()->Catch(f2);
+ V8::RunMicrotasks(isolate);
+ CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
+ CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
+
+ pr->Resolve(v8::Integer::New(isolate, 1));
+ rr->Reject(v8::Integer::New(isolate, 2));
+ CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
+ CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
+
+ V8::RunMicrotasks(isolate);
+ CHECK_EQ(1, global->Get(v8_str("x1"))->Int32Value());
+ CHECK_EQ(2, global->Get(v8_str("x2"))->Int32Value());
+
+ // Multi-chaining.
+ CompileRun("x1 = x2 = 0;");
+ pr = v8::Promise::Resolver::New(isolate);
+ pr->GetPromise()->Chain(f1)->Chain(f2);
+ pr->Resolve(v8::Integer::New(isolate, 3));
+ CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
+ CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
+ V8::RunMicrotasks(isolate);
+ CHECK_EQ(3, global->Get(v8_str("x1"))->Int32Value());
+ CHECK_EQ(4, global->Get(v8_str("x2"))->Int32Value());
+
+ CompileRun("x1 = x2 = 0;");
+ rr = v8::Promise::Resolver::New(isolate);
+ rr->GetPromise()->Catch(f1)->Chain(f2);
+ rr->Reject(v8::Integer::New(isolate, 3));
+ CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
+ CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
+ V8::RunMicrotasks(isolate);
+ CHECK_EQ(3, global->Get(v8_str("x1"))->Int32Value());
+ CHECK_EQ(4, global->Get(v8_str("x2"))->Int32Value());
+}
+
+
+TEST(DisallowJavascriptExecutionScope) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Isolate::DisallowJavascriptExecutionScope no_js(
+ isolate, v8::Isolate::DisallowJavascriptExecutionScope::CRASH_ON_FAILURE);
+ CompileRun("2+2");
+}
+
+
+TEST(AllowJavascriptExecutionScope) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Isolate::DisallowJavascriptExecutionScope no_js(
+ isolate, v8::Isolate::DisallowJavascriptExecutionScope::CRASH_ON_FAILURE);
+ v8::Isolate::DisallowJavascriptExecutionScope throw_js(
+ isolate, v8::Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE);
+ { v8::Isolate::AllowJavascriptExecutionScope yes_js(isolate);
+ CompileRun("1+1");
+ }
+}
+
+
+TEST(ThrowOnJavascriptExecution) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::TryCatch try_catch;
+ v8::Isolate::DisallowJavascriptExecutionScope throw_js(
+ isolate, v8::Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE);
+ CompileRun("1+1");
+ CHECK(try_catch.HasCaught());
}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index b21dc34dc..9c1c04fe3 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -1266,6 +1266,10 @@ TEST(15) {
uint32_t dstA1;
uint32_t dstA2;
uint32_t dstA3;
+ uint32_t dstA4;
+ uint32_t dstA5;
+ uint32_t dstA6;
+ uint32_t dstA7;
} T;
T t;
@@ -1291,7 +1295,14 @@ TEST(15) {
__ add(r4, r0, Operand(OFFSET_OF(T, dstA0)));
__ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(r4));
- __ ldm(ia_w, sp, r4.bit() | pc.bit());
+ // The same expansion, but with different source and destination registers.
+ __ add(r4, r0, Operand(OFFSET_OF(T, srcA0)));
+ __ vld1(Neon8, NeonListOperand(d1), NeonMemOperand(r4));
+ __ vmovl(NeonU8, q1, d1);
+ __ add(r4, r0, Operand(OFFSET_OF(T, dstA4)));
+ __ vst1(Neon8, NeonListOperand(d2, 2), NeonMemOperand(r4));
+
+ __ ldm(ia_w, sp, r4.bit() | pc.bit());
CodeDesc desc;
assm.GetCode(&desc);
@@ -1326,6 +1337,10 @@ TEST(15) {
t.dstA1 = 0;
t.dstA2 = 0;
t.dstA3 = 0;
+ t.dstA4 = 0;
+ t.dstA5 = 0;
+ t.dstA6 = 0;
+ t.dstA7 = 0;
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(0x01020304, t.dst0);
@@ -1340,6 +1355,10 @@ TEST(15) {
CHECK_EQ(0x00410042, t.dstA1);
CHECK_EQ(0x00830084, t.dstA2);
CHECK_EQ(0x00810082, t.dstA3);
+ CHECK_EQ(0x00430044, t.dstA4);
+ CHECK_EQ(0x00410042, t.dstA5);
+ CHECK_EQ(0x00830084, t.dstA6);
+ CHECK_EQ(0x00810082, t.dstA7);
}
}
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
new file mode 100644
index 000000000..51c202fc0
--- /dev/null
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -0,0 +1,10801 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <cmath>
+#include <limits>
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "arm64/simulator-arm64.h"
+#include "arm64/decoder-arm64-inl.h"
+#include "arm64/disasm-arm64.h"
+#include "arm64/utils-arm64.h"
+#include "cctest.h"
+#include "test-utils-arm64.h"
+
+using namespace v8::internal;
+
+// Test infrastructure.
+//
+// Tests are functions which accept no parameters and have no return values.
+// The testing code should not perform an explicit return once completed. For
+// example to test the mov immediate instruction a very simple test would be:
+//
+// TEST(mov_x0_one) {
+// SETUP();
+//
+// START();
+// __ mov(x0, Operand(1));
+// END();
+//
+// RUN();
+//
+// ASSERT_EQUAL_64(1, x0);
+//
+// TEARDOWN();
+// }
+//
+// Within a START ... END block all registers but sp can be modified. sp has to
+// be explicitly saved/restored. The END() macro replaces the function return
+// so it may appear multiple times in a test if the test has multiple exit
+// points.
+//
+// Once the test has been run all integer and floating point registers as well
+// as flags are accessible through a RegisterDump instance, see
+// utils-arm64.cc for more info on RegisterDump.
+//
+// We provide some helper assert to handle common cases:
+//
+// ASSERT_EQUAL_32(int32_t, int_32t)
+// ASSERT_EQUAL_FP32(float, float)
+// ASSERT_EQUAL_32(int32_t, W register)
+// ASSERT_EQUAL_FP32(float, S register)
+// ASSERT_EQUAL_64(int64_t, int_64t)
+// ASSERT_EQUAL_FP64(double, double)
+// ASSERT_EQUAL_64(int64_t, X register)
+// ASSERT_EQUAL_64(X register, X register)
+// ASSERT_EQUAL_FP64(double, D register)
+//
+// e.g. ASSERT_EQUAL_64(0.5, d30);
+//
+// If more advance computation is required before the assert then access the
+// RegisterDump named core directly:
+//
+// ASSERT_EQUAL_64(0x1234, core.xreg(0) & 0xffff);
+
+
+#if 0 // TODO(all): enable.
+static v8::Persistent<v8::Context> env;
+
+static void InitializeVM() {
+ if (env.IsEmpty()) {
+ env = v8::Context::New();
+ }
+}
+#endif
+
+#define __ masm.
+
+#define BUF_SIZE 8192
+#define SETUP() SETUP_SIZE(BUF_SIZE)
+
+#define INIT_V8() \
+ CcTest::InitializeVM(); \
+
+#ifdef USE_SIMULATOR
+
+// Run tests with the simulator.
+#define SETUP_SIZE(buf_size) \
+ Isolate* isolate = Isolate::Current(); \
+ HandleScope scope(isolate); \
+ ASSERT(isolate != NULL); \
+ byte* buf = new byte[buf_size]; \
+ MacroAssembler masm(isolate, buf, buf_size); \
+ Decoder<DispatchingDecoderVisitor>* decoder = \
+ new Decoder<DispatchingDecoderVisitor>(); \
+ Simulator simulator(decoder); \
+ PrintDisassembler* pdis = NULL; \
+ RegisterDump core;
+
+/* if (Cctest::trace_sim()) { \
+ pdis = new PrintDisassembler(stdout); \
+ decoder.PrependVisitor(pdis); \
+ } \
+ */
+
+// Reset the assembler and simulator, so that instructions can be generated,
+// but don't actually emit any code. This can be used by tests that need to
+// emit instructions at the start of the buffer. Note that START_AFTER_RESET
+// must be called before any callee-saved register is modified, and before an
+// END is encountered.
+//
+// Most tests should call START, rather than call RESET directly.
+#define RESET() \
+ __ Reset(); \
+ simulator.ResetState();
+
+#define START_AFTER_RESET() \
+ __ SetStackPointer(csp); \
+ __ PushCalleeSavedRegisters(); \
+ __ Debug("Start test.", __LINE__, TRACE_ENABLE | LOG_ALL);
+
+#define START() \
+ RESET(); \
+ START_AFTER_RESET();
+
+#define RUN() \
+ simulator.RunFrom(reinterpret_cast<Instruction*>(buf))
+
+#define END() \
+ __ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL); \
+ core.Dump(&masm); \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
+ __ GetCode(NULL);
+
+#define TEARDOWN() \
+ delete pdis; \
+ delete[] buf;
+
+#else // ifdef USE_SIMULATOR.
+// Run the test on real hardware or models.
+#define SETUP_SIZE(buf_size) \
+ Isolate* isolate = Isolate::Current(); \
+ HandleScope scope(isolate); \
+ ASSERT(isolate != NULL); \
+ byte* buf = new byte[buf_size]; \
+ MacroAssembler masm(isolate, buf, buf_size); \
+ RegisterDump core; \
+ CPU::SetUp();
+
+#define RESET() \
+ __ Reset();
+
+#define START_AFTER_RESET() \
+ __ SetStackPointer(csp); \
+ __ PushCalleeSavedRegisters();
+
+#define START() \
+ RESET(); \
+ START_AFTER_RESET();
+
+#define RUN() \
+ CPU::FlushICache(buf, masm.SizeOfGeneratedCode()); \
+ { \
+ void (*test_function)(void); \
+ memcpy(&test_function, &buf, sizeof(buf)); \
+ test_function(); \
+ }
+
+#define END() \
+ core.Dump(&masm); \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
+ __ GetCode(NULL);
+
+#define TEARDOWN() \
+ delete[] buf;
+
+#endif // ifdef USE_SIMULATOR.
+
+#define ASSERT_EQUAL_NZCV(expected) \
+ CHECK(EqualNzcv(expected, core.flags_nzcv()))
+
+#define ASSERT_EQUAL_REGISTERS(expected) \
+ CHECK(EqualRegisters(&expected, &core))
+
+#define ASSERT_EQUAL_32(expected, result) \
+ CHECK(Equal32(static_cast<uint32_t>(expected), &core, result))
+
+#define ASSERT_EQUAL_FP32(expected, result) \
+ CHECK(EqualFP32(expected, &core, result))
+
+#define ASSERT_EQUAL_64(expected, result) \
+ CHECK(Equal64(expected, &core, result))
+
+#define ASSERT_EQUAL_FP64(expected, result) \
+ CHECK(EqualFP64(expected, &core, result))
+
+#ifdef DEBUG
+#define ASSERT_LITERAL_POOL_SIZE(expected) \
+ CHECK((expected) == (__ LiteralPoolSize()))
+#else
+#define ASSERT_LITERAL_POOL_SIZE(expected) \
+ ((void) 0)
+#endif
+
+
+TEST(stack_ops) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ // save csp.
+ __ Mov(x29, csp);
+
+ // Set the csp to a known value.
+ __ Mov(x16, 0x1000);
+ __ Mov(csp, x16);
+ __ Mov(x0, csp);
+
+ // Add immediate to the csp, and move the result to a normal register.
+ __ Add(csp, csp, Operand(0x50));
+ __ Mov(x1, csp);
+
+ // Add extended to the csp, and move the result to a normal register.
+ __ Mov(x17, 0xfff);
+ __ Add(csp, csp, Operand(x17, SXTB));
+ __ Mov(x2, csp);
+
+ // Create an csp using a logical instruction, and move to normal register.
+ __ Orr(csp, xzr, Operand(0x1fff));
+ __ Mov(x3, csp);
+
+ // Write wcsp using a logical instruction.
+ __ Orr(wcsp, wzr, Operand(0xfffffff8L));
+ __ Mov(x4, csp);
+
+ // Write csp, and read back wcsp.
+ __ Orr(csp, xzr, Operand(0xfffffff8L));
+ __ Mov(w5, wcsp);
+
+ // restore csp.
+ __ Mov(csp, x29);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1000, x0);
+ ASSERT_EQUAL_64(0x1050, x1);
+ ASSERT_EQUAL_64(0x104f, x2);
+ ASSERT_EQUAL_64(0x1fff, x3);
+ ASSERT_EQUAL_64(0xfffffff8, x4);
+ ASSERT_EQUAL_64(0xfffffff8, x5);
+
+ TEARDOWN();
+}
+
+
+TEST(mvn) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mvn(w0, 0xfff);
+ __ Mvn(x1, 0xfff);
+ __ Mvn(w2, Operand(w0, LSL, 1));
+ __ Mvn(x3, Operand(x1, LSL, 2));
+ __ Mvn(w4, Operand(w0, LSR, 3));
+ __ Mvn(x5, Operand(x1, LSR, 4));
+ __ Mvn(w6, Operand(w0, ASR, 11));
+ __ Mvn(x7, Operand(x1, ASR, 12));
+ __ Mvn(w8, Operand(w0, ROR, 13));
+ __ Mvn(x9, Operand(x1, ROR, 14));
+ __ Mvn(w10, Operand(w2, UXTB));
+ __ Mvn(x11, Operand(x2, SXTB, 1));
+ __ Mvn(w12, Operand(w2, UXTH, 2));
+ __ Mvn(x13, Operand(x2, SXTH, 3));
+ __ Mvn(x14, Operand(w2, UXTW, 4));
+ __ Mvn(x15, Operand(w2, SXTW, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xfffff000, x0);
+ ASSERT_EQUAL_64(0xfffffffffffff000UL, x1);
+ ASSERT_EQUAL_64(0x00001fff, x2);
+ ASSERT_EQUAL_64(0x0000000000003fffUL, x3);
+ ASSERT_EQUAL_64(0xe00001ff, x4);
+ ASSERT_EQUAL_64(0xf0000000000000ffUL, x5);
+ ASSERT_EQUAL_64(0x00000001, x6);
+ ASSERT_EQUAL_64(0x0, x7);
+ ASSERT_EQUAL_64(0x7ff80000, x8);
+ ASSERT_EQUAL_64(0x3ffc000000000000UL, x9);
+ ASSERT_EQUAL_64(0xffffff00, x10);
+ ASSERT_EQUAL_64(0x0000000000000001UL, x11);
+ ASSERT_EQUAL_64(0xffff8003, x12);
+ ASSERT_EQUAL_64(0xffffffffffff0007UL, x13);
+ ASSERT_EQUAL_64(0xfffffffffffe000fUL, x14);
+ ASSERT_EQUAL_64(0xfffffffffffe000fUL, x15);
+
+ TEARDOWN();
+}
+
+
+TEST(mov) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffL);
+ __ Mov(x1, 0xffffffffffffffffL);
+ __ Mov(x2, 0xffffffffffffffffL);
+ __ Mov(x3, 0xffffffffffffffffL);
+
+ __ Mov(x0, 0x0123456789abcdefL);
+
+ __ movz(x1, 0xabcdL << 16);
+ __ movk(x2, 0xabcdL << 32);
+ __ movn(x3, 0xabcdL << 48);
+
+ __ Mov(x4, 0x0123456789abcdefL);
+ __ Mov(x5, x4);
+
+ __ Mov(w6, -1);
+
+ // Test that moves back to the same register have the desired effect. This
+ // is a no-op for X registers, and a truncation for W registers.
+ __ Mov(x7, 0x0123456789abcdefL);
+ __ Mov(x7, x7);
+ __ Mov(x8, 0x0123456789abcdefL);
+ __ Mov(w8, w8);
+ __ Mov(x9, 0x0123456789abcdefL);
+ __ Mov(x9, Operand(x9));
+ __ Mov(x10, 0x0123456789abcdefL);
+ __ Mov(w10, Operand(w10));
+
+ __ Mov(w11, 0xfff);
+ __ Mov(x12, 0xfff);
+ __ Mov(w13, Operand(w11, LSL, 1));
+ __ Mov(x14, Operand(x12, LSL, 2));
+ __ Mov(w15, Operand(w11, LSR, 3));
+ __ Mov(x18, Operand(x12, LSR, 4));
+ __ Mov(w19, Operand(w11, ASR, 11));
+ __ Mov(x20, Operand(x12, ASR, 12));
+ __ Mov(w21, Operand(w11, ROR, 13));
+ __ Mov(x22, Operand(x12, ROR, 14));
+ __ Mov(w23, Operand(w13, UXTB));
+ __ Mov(x24, Operand(x13, SXTB, 1));
+ __ Mov(w25, Operand(w13, UXTH, 2));
+ __ Mov(x26, Operand(x13, SXTH, 3));
+ __ Mov(x27, Operand(w13, UXTW, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x0);
+ ASSERT_EQUAL_64(0x00000000abcd0000L, x1);
+ ASSERT_EQUAL_64(0xffffabcdffffffffL, x2);
+ ASSERT_EQUAL_64(0x5432ffffffffffffL, x3);
+ ASSERT_EQUAL_64(x4, x5);
+ ASSERT_EQUAL_32(-1, w6);
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x7);
+ ASSERT_EQUAL_32(0x89abcdefL, w8);
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x9);
+ ASSERT_EQUAL_32(0x89abcdefL, w10);
+ ASSERT_EQUAL_64(0x00000fff, x11);
+ ASSERT_EQUAL_64(0x0000000000000fffUL, x12);
+ ASSERT_EQUAL_64(0x00001ffe, x13);
+ ASSERT_EQUAL_64(0x0000000000003ffcUL, x14);
+ ASSERT_EQUAL_64(0x000001ff, x15);
+ ASSERT_EQUAL_64(0x00000000000000ffUL, x18);
+ ASSERT_EQUAL_64(0x00000001, x19);
+ ASSERT_EQUAL_64(0x0, x20);
+ ASSERT_EQUAL_64(0x7ff80000, x21);
+ ASSERT_EQUAL_64(0x3ffc000000000000UL, x22);
+ ASSERT_EQUAL_64(0x000000fe, x23);
+ ASSERT_EQUAL_64(0xfffffffffffffffcUL, x24);
+ ASSERT_EQUAL_64(0x00007ff8, x25);
+ ASSERT_EQUAL_64(0x000000000000fff0UL, x26);
+ ASSERT_EQUAL_64(0x000000000001ffe0UL, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(mov_imm_w) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w0, 0xffffffffL);
+ __ Mov(w1, 0xffff1234L);
+ __ Mov(w2, 0x1234ffffL);
+ __ Mov(w3, 0x00000000L);
+ __ Mov(w4, 0x00001234L);
+ __ Mov(w5, 0x12340000L);
+ __ Mov(w6, 0x12345678L);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffffL, x0);
+ ASSERT_EQUAL_64(0xffff1234L, x1);
+ ASSERT_EQUAL_64(0x1234ffffL, x2);
+ ASSERT_EQUAL_64(0x00000000L, x3);
+ ASSERT_EQUAL_64(0x00001234L, x4);
+ ASSERT_EQUAL_64(0x12340000L, x5);
+ ASSERT_EQUAL_64(0x12345678L, x6);
+
+ TEARDOWN();
+}
+
+
+TEST(mov_imm_x) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffL);
+ __ Mov(x1, 0xffffffffffff1234L);
+ __ Mov(x2, 0xffffffff12345678L);
+ __ Mov(x3, 0xffff1234ffff5678L);
+ __ Mov(x4, 0x1234ffffffff5678L);
+ __ Mov(x5, 0x1234ffff5678ffffL);
+ __ Mov(x6, 0x12345678ffffffffL);
+ __ Mov(x7, 0x1234ffffffffffffL);
+ __ Mov(x8, 0x123456789abcffffL);
+ __ Mov(x9, 0x12345678ffff9abcL);
+ __ Mov(x10, 0x1234ffff56789abcL);
+ __ Mov(x11, 0xffff123456789abcL);
+ __ Mov(x12, 0x0000000000000000L);
+ __ Mov(x13, 0x0000000000001234L);
+ __ Mov(x14, 0x0000000012345678L);
+ __ Mov(x15, 0x0000123400005678L);
+ __ Mov(x18, 0x1234000000005678L);
+ __ Mov(x19, 0x1234000056780000L);
+ __ Mov(x20, 0x1234567800000000L);
+ __ Mov(x21, 0x1234000000000000L);
+ __ Mov(x22, 0x123456789abc0000L);
+ __ Mov(x23, 0x1234567800009abcL);
+ __ Mov(x24, 0x1234000056789abcL);
+ __ Mov(x25, 0x0000123456789abcL);
+ __ Mov(x26, 0x123456789abcdef0L);
+ __ Mov(x27, 0xffff000000000001L);
+ __ Mov(x28, 0x8000ffff00000000L);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffffffff1234L, x1);
+ ASSERT_EQUAL_64(0xffffffff12345678L, x2);
+ ASSERT_EQUAL_64(0xffff1234ffff5678L, x3);
+ ASSERT_EQUAL_64(0x1234ffffffff5678L, x4);
+ ASSERT_EQUAL_64(0x1234ffff5678ffffL, x5);
+ ASSERT_EQUAL_64(0x12345678ffffffffL, x6);
+ ASSERT_EQUAL_64(0x1234ffffffffffffL, x7);
+ ASSERT_EQUAL_64(0x123456789abcffffL, x8);
+ ASSERT_EQUAL_64(0x12345678ffff9abcL, x9);
+ ASSERT_EQUAL_64(0x1234ffff56789abcL, x10);
+ ASSERT_EQUAL_64(0xffff123456789abcL, x11);
+ ASSERT_EQUAL_64(0x0000000000000000L, x12);
+ ASSERT_EQUAL_64(0x0000000000001234L, x13);
+ ASSERT_EQUAL_64(0x0000000012345678L, x14);
+ ASSERT_EQUAL_64(0x0000123400005678L, x15);
+ ASSERT_EQUAL_64(0x1234000000005678L, x18);
+ ASSERT_EQUAL_64(0x1234000056780000L, x19);
+ ASSERT_EQUAL_64(0x1234567800000000L, x20);
+ ASSERT_EQUAL_64(0x1234000000000000L, x21);
+ ASSERT_EQUAL_64(0x123456789abc0000L, x22);
+ ASSERT_EQUAL_64(0x1234567800009abcL, x23);
+ ASSERT_EQUAL_64(0x1234000056789abcL, x24);
+ ASSERT_EQUAL_64(0x0000123456789abcL, x25);
+ ASSERT_EQUAL_64(0x123456789abcdef0L, x26);
+ ASSERT_EQUAL_64(0xffff000000000001L, x27);
+ ASSERT_EQUAL_64(0x8000ffff00000000L, x28);
+
+ TEARDOWN();
+}
+
+
+TEST(orr) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xf0f0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Orr(x2, x0, Operand(x1));
+ __ Orr(w3, w0, Operand(w1, LSL, 28));
+ __ Orr(x4, x0, Operand(x1, LSL, 32));
+ __ Orr(x5, x0, Operand(x1, LSR, 4));
+ __ Orr(w6, w0, Operand(w1, ASR, 4));
+ __ Orr(x7, x0, Operand(x1, ASR, 4));
+ __ Orr(w8, w0, Operand(w1, ROR, 12));
+ __ Orr(x9, x0, Operand(x1, ROR, 12));
+ __ Orr(w10, w0, Operand(0xf));
+ __ Orr(x11, x0, Operand(0xf0000000f0000000L));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xf000f0ff, x2);
+ ASSERT_EQUAL_64(0xf000f0f0, x3);
+ ASSERT_EQUAL_64(0xf00000ff0000f0f0L, x4);
+ ASSERT_EQUAL_64(0x0f00f0ff, x5);
+ ASSERT_EQUAL_64(0xff00f0ff, x6);
+ ASSERT_EQUAL_64(0x0f00f0ff, x7);
+ ASSERT_EQUAL_64(0x0ffff0f0, x8);
+ ASSERT_EQUAL_64(0x0ff00000000ff0f0L, x9);
+ ASSERT_EQUAL_64(0xf0ff, x10);
+ ASSERT_EQUAL_64(0xf0000000f000f0f0L, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(orr_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0x8000000080008080UL);
+ __ Orr(w6, w0, Operand(w1, UXTB));
+ __ Orr(x7, x0, Operand(x1, UXTH, 1));
+ __ Orr(w8, w0, Operand(w1, UXTW, 2));
+ __ Orr(x9, x0, Operand(x1, UXTX, 3));
+ __ Orr(w10, w0, Operand(w1, SXTB));
+ __ Orr(x11, x0, Operand(x1, SXTH, 1));
+ __ Orr(x12, x0, Operand(x1, SXTW, 2));
+ __ Orr(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x00000081, x6);
+ ASSERT_EQUAL_64(0x00010101, x7);
+ ASSERT_EQUAL_64(0x00020201, x8);
+ ASSERT_EQUAL_64(0x0000000400040401UL, x9);
+ ASSERT_EQUAL_64(0x00000000ffffff81UL, x10);
+ ASSERT_EQUAL_64(0xffffffffffff0101UL, x11);
+ ASSERT_EQUAL_64(0xfffffffe00020201UL, x12);
+ ASSERT_EQUAL_64(0x0000000400040401UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(bitwise_wide_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0xf0f0f0f0f0f0f0f0UL);
+
+ __ Orr(x10, x0, Operand(0x1234567890abcdefUL));
+ __ Orr(w11, w1, Operand(0x90abcdef));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(0xf0f0f0f0f0f0f0f0UL, x1);
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
+ ASSERT_EQUAL_64(0xf0fbfdffUL, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(orn) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xf0f0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Orn(x2, x0, Operand(x1));
+ __ Orn(w3, w0, Operand(w1, LSL, 4));
+ __ Orn(x4, x0, Operand(x1, LSL, 4));
+ __ Orn(x5, x0, Operand(x1, LSR, 1));
+ __ Orn(w6, w0, Operand(w1, ASR, 1));
+ __ Orn(x7, x0, Operand(x1, ASR, 1));
+ __ Orn(w8, w0, Operand(w1, ROR, 16));
+ __ Orn(x9, x0, Operand(x1, ROR, 16));
+ __ Orn(w10, w0, Operand(0xffff));
+ __ Orn(x11, x0, Operand(0xffff0000ffffL));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffff0ffffff0L, x2);
+ ASSERT_EQUAL_64(0xfffff0ff, x3);
+ ASSERT_EQUAL_64(0xfffffff0fffff0ffL, x4);
+ ASSERT_EQUAL_64(0xffffffff87fffff0L, x5);
+ ASSERT_EQUAL_64(0x07fffff0, x6);
+ ASSERT_EQUAL_64(0xffffffff87fffff0L, x7);
+ ASSERT_EQUAL_64(0xff00ffff, x8);
+ ASSERT_EQUAL_64(0xff00ffffffffffffL, x9);
+ ASSERT_EQUAL_64(0xfffff0f0, x10);
+ ASSERT_EQUAL_64(0xffff0000fffff0f0L, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(orn_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ Orn(w6, w0, Operand(w1, UXTB));
+ __ Orn(x7, x0, Operand(x1, UXTH, 1));
+ __ Orn(w8, w0, Operand(w1, UXTW, 2));
+ __ Orn(x9, x0, Operand(x1, UXTX, 3));
+ __ Orn(w10, w0, Operand(w1, SXTB));
+ __ Orn(x11, x0, Operand(x1, SXTH, 1));
+ __ Orn(x12, x0, Operand(x1, SXTW, 2));
+ __ Orn(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffff7f, x6);
+ ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7);
+ ASSERT_EQUAL_64(0xfffdfdfb, x8);
+ ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
+ ASSERT_EQUAL_64(0x0000007f, x10);
+ ASSERT_EQUAL_64(0x0000fefd, x11);
+ ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12);
+ ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(and_) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ And(x2, x0, Operand(x1));
+ __ And(w3, w0, Operand(w1, LSL, 4));
+ __ And(x4, x0, Operand(x1, LSL, 4));
+ __ And(x5, x0, Operand(x1, LSR, 1));
+ __ And(w6, w0, Operand(w1, ASR, 20));
+ __ And(x7, x0, Operand(x1, ASR, 20));
+ __ And(w8, w0, Operand(w1, ROR, 28));
+ __ And(x9, x0, Operand(x1, ROR, 28));
+ __ And(w10, w0, Operand(0xff00));
+ __ And(x11, x0, Operand(0xff));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x000000f0, x2);
+ ASSERT_EQUAL_64(0x00000ff0, x3);
+ ASSERT_EQUAL_64(0x00000ff0, x4);
+ ASSERT_EQUAL_64(0x00000070, x5);
+ ASSERT_EQUAL_64(0x0000ff00, x6);
+ ASSERT_EQUAL_64(0x00000f00, x7);
+ ASSERT_EQUAL_64(0x00000ff0, x8);
+ ASSERT_EQUAL_64(0x00000000, x9);
+ ASSERT_EQUAL_64(0x0000ff00, x10);
+ ASSERT_EQUAL_64(0x000000f0, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(and_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffUL);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ And(w6, w0, Operand(w1, UXTB));
+ __ And(x7, x0, Operand(x1, UXTH, 1));
+ __ And(w8, w0, Operand(w1, UXTW, 2));
+ __ And(x9, x0, Operand(x1, UXTX, 3));
+ __ And(w10, w0, Operand(w1, SXTB));
+ __ And(x11, x0, Operand(x1, SXTH, 1));
+ __ And(x12, x0, Operand(x1, SXTW, 2));
+ __ And(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x00000081, x6);
+ ASSERT_EQUAL_64(0x00010102, x7);
+ ASSERT_EQUAL_64(0x00020204, x8);
+ ASSERT_EQUAL_64(0x0000000400040408UL, x9);
+ ASSERT_EQUAL_64(0xffffff81, x10);
+ ASSERT_EQUAL_64(0xffffffffffff0102UL, x11);
+ ASSERT_EQUAL_64(0xfffffffe00020204UL, x12);
+ ASSERT_EQUAL_64(0x0000000400040408UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(ands) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0xf00000ff);
+ __ Ands(w0, w1, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0xf00000ff, x0);
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+ __ Ands(w0, w0, Operand(w1, LSR, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ START();
+ __ Mov(x0, 0x8000000000000000L);
+ __ Mov(x1, 0x00000001);
+ __ Ands(x0, x0, Operand(x1, ROR, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000000L, x0);
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Ands(w0, w0, Operand(0xf));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ START();
+ __ Mov(x0, 0xff000000);
+ __ Ands(w0, w0, Operand(0x80000000));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x80000000, x0);
+
+ TEARDOWN();
+}
+
+
+TEST(bic) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Bic(x2, x0, Operand(x1));
+ __ Bic(w3, w0, Operand(w1, LSL, 4));
+ __ Bic(x4, x0, Operand(x1, LSL, 4));
+ __ Bic(x5, x0, Operand(x1, LSR, 1));
+ __ Bic(w6, w0, Operand(w1, ASR, 20));
+ __ Bic(x7, x0, Operand(x1, ASR, 20));
+ __ Bic(w8, w0, Operand(w1, ROR, 28));
+ __ Bic(x9, x0, Operand(x1, ROR, 24));
+ __ Bic(x10, x0, Operand(0x1f));
+ __ Bic(x11, x0, Operand(0x100));
+
+ // Test bic into csp when the constant cannot be encoded in the immediate
+ // field.
+ // Use x20 to preserve csp. We check for the result via x21 because the
+ // test infrastructure requires that csp be restored to its original value.
+ __ Mov(x20, csp);
+ __ Mov(x0, 0xffffff);
+ __ Bic(csp, x0, Operand(0xabcdef));
+ __ Mov(x21, csp);
+ __ Mov(csp, x20);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x0000ff00, x2);
+ ASSERT_EQUAL_64(0x0000f000, x3);
+ ASSERT_EQUAL_64(0x0000f000, x4);
+ ASSERT_EQUAL_64(0x0000ff80, x5);
+ ASSERT_EQUAL_64(0x000000f0, x6);
+ ASSERT_EQUAL_64(0x0000f0f0, x7);
+ ASSERT_EQUAL_64(0x0000f000, x8);
+ ASSERT_EQUAL_64(0x0000ff00, x9);
+ ASSERT_EQUAL_64(0x0000ffe0, x10);
+ ASSERT_EQUAL_64(0x0000fef0, x11);
+
+ ASSERT_EQUAL_64(0x543210, x21);
+
+ TEARDOWN();
+}
+
+
+TEST(bic_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffUL);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ Bic(w6, w0, Operand(w1, UXTB));
+ __ Bic(x7, x0, Operand(x1, UXTH, 1));
+ __ Bic(w8, w0, Operand(w1, UXTW, 2));
+ __ Bic(x9, x0, Operand(x1, UXTX, 3));
+ __ Bic(w10, w0, Operand(w1, SXTB));
+ __ Bic(x11, x0, Operand(x1, SXTH, 1));
+ __ Bic(x12, x0, Operand(x1, SXTW, 2));
+ __ Bic(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffff7e, x6);
+ ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7);
+ ASSERT_EQUAL_64(0xfffdfdfb, x8);
+ ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
+ ASSERT_EQUAL_64(0x0000007e, x10);
+ ASSERT_EQUAL_64(0x0000fefd, x11);
+ ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12);
+ ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(bics) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0xffff);
+ __ Bics(w0, w1, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ START();
+ __ Mov(x0, 0xffffffff);
+ __ Bics(w0, w0, Operand(w0, LSR, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x80000000, x0);
+
+ START();
+ __ Mov(x0, 0x8000000000000000L);
+ __ Mov(x1, 0x00000001);
+ __ Bics(x0, x0, Operand(x1, ROR, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ START();
+ __ Mov(x0, 0xffffffffffffffffL);
+ __ Bics(x0, x0, Operand(0x7fffffffffffffffL));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000000L, x0);
+
+ START();
+ __ Mov(w0, 0xffff0000);
+ __ Bics(w0, w0, Operand(0xfffffff0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0x00000000, x0);
+
+ TEARDOWN();
+}
+
+
+TEST(eor) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Eor(x2, x0, Operand(x1));
+ __ Eor(w3, w0, Operand(w1, LSL, 4));
+ __ Eor(x4, x0, Operand(x1, LSL, 4));
+ __ Eor(x5, x0, Operand(x1, LSR, 1));
+ __ Eor(w6, w0, Operand(w1, ASR, 20));
+ __ Eor(x7, x0, Operand(x1, ASR, 20));
+ __ Eor(w8, w0, Operand(w1, ROR, 28));
+ __ Eor(x9, x0, Operand(x1, ROR, 28));
+ __ Eor(w10, w0, Operand(0xff00ff00));
+ __ Eor(x11, x0, Operand(0xff00ff00ff00ff00L));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xf000ff0f, x2);
+ ASSERT_EQUAL_64(0x0000f000, x3);
+ ASSERT_EQUAL_64(0x0000000f0000f000L, x4);
+ ASSERT_EQUAL_64(0x7800ff8f, x5);
+ ASSERT_EQUAL_64(0xffff00f0, x6);
+ ASSERT_EQUAL_64(0x0000f0f0, x7);
+ ASSERT_EQUAL_64(0x0000f00f, x8);
+ ASSERT_EQUAL_64(0x00000ff00000ffffL, x9);
+ ASSERT_EQUAL_64(0xff0000f0, x10);
+ ASSERT_EQUAL_64(0xff00ff00ff0000f0L, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(eor_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0x1111111111111111UL);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ Eor(w6, w0, Operand(w1, UXTB));
+ __ Eor(x7, x0, Operand(x1, UXTH, 1));
+ __ Eor(w8, w0, Operand(w1, UXTW, 2));
+ __ Eor(x9, x0, Operand(x1, UXTX, 3));
+ __ Eor(w10, w0, Operand(w1, SXTB));
+ __ Eor(x11, x0, Operand(x1, SXTH, 1));
+ __ Eor(x12, x0, Operand(x1, SXTW, 2));
+ __ Eor(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x11111190, x6);
+ ASSERT_EQUAL_64(0x1111111111101013UL, x7);
+ ASSERT_EQUAL_64(0x11131315, x8);
+ ASSERT_EQUAL_64(0x1111111511151519UL, x9);
+ ASSERT_EQUAL_64(0xeeeeee90, x10);
+ ASSERT_EQUAL_64(0xeeeeeeeeeeee1013UL, x11);
+ ASSERT_EQUAL_64(0xeeeeeeef11131315UL, x12);
+ ASSERT_EQUAL_64(0x1111111511151519UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(eon) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xfff0);
+ __ Mov(x1, 0xf00000ff);
+
+ __ Eon(x2, x0, Operand(x1));
+ __ Eon(w3, w0, Operand(w1, LSL, 4));
+ __ Eon(x4, x0, Operand(x1, LSL, 4));
+ __ Eon(x5, x0, Operand(x1, LSR, 1));
+ __ Eon(w6, w0, Operand(w1, ASR, 20));
+ __ Eon(x7, x0, Operand(x1, ASR, 20));
+ __ Eon(w8, w0, Operand(w1, ROR, 28));
+ __ Eon(x9, x0, Operand(x1, ROR, 28));
+ __ Eon(w10, w0, Operand(0x03c003c0));
+ __ Eon(x11, x0, Operand(0x0000100000001000L));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffff0fff00f0L, x2);
+ ASSERT_EQUAL_64(0xffff0fff, x3);
+ ASSERT_EQUAL_64(0xfffffff0ffff0fffL, x4);
+ ASSERT_EQUAL_64(0xffffffff87ff0070L, x5);
+ ASSERT_EQUAL_64(0x0000ff0f, x6);
+ ASSERT_EQUAL_64(0xffffffffffff0f0fL, x7);
+ ASSERT_EQUAL_64(0xffff0ff0, x8);
+ ASSERT_EQUAL_64(0xfffff00fffff0000L, x9);
+ ASSERT_EQUAL_64(0xfc3f03cf, x10);
+ ASSERT_EQUAL_64(0xffffefffffff100fL, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(eon_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0x1111111111111111UL);
+ __ Mov(x1, 0x8000000080008081UL);
+ __ Eon(w6, w0, Operand(w1, UXTB));
+ __ Eon(x7, x0, Operand(x1, UXTH, 1));
+ __ Eon(w8, w0, Operand(w1, UXTW, 2));
+ __ Eon(x9, x0, Operand(x1, UXTX, 3));
+ __ Eon(w10, w0, Operand(w1, SXTB));
+ __ Eon(x11, x0, Operand(x1, SXTH, 1));
+ __ Eon(x12, x0, Operand(x1, SXTW, 2));
+ __ Eon(x13, x0, Operand(x1, SXTX, 3));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xeeeeee6f, x6);
+ ASSERT_EQUAL_64(0xeeeeeeeeeeefefecUL, x7);
+ ASSERT_EQUAL_64(0xeeececea, x8);
+ ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x9);
+ ASSERT_EQUAL_64(0x1111116f, x10);
+ ASSERT_EQUAL_64(0x111111111111efecUL, x11);
+ ASSERT_EQUAL_64(0x11111110eeececeaUL, x12);
+ ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(mul) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+
+ __ Mul(w0, w16, w16);
+ __ Mul(w1, w16, w17);
+ __ Mul(w2, w17, w18);
+ __ Mul(w3, w18, w19);
+ __ Mul(x4, x16, x16);
+ __ Mul(x5, x17, x18);
+ __ Mul(x6, x18, x19);
+ __ Mul(x7, x19, x19);
+ __ Smull(x8, w17, w18);
+ __ Smull(x9, w18, w18);
+ __ Smull(x10, w19, w19);
+ __ Mneg(w11, w16, w16);
+ __ Mneg(w12, w16, w17);
+ __ Mneg(w13, w17, w18);
+ __ Mneg(w14, w18, w19);
+ __ Mneg(x20, x16, x16);
+ __ Mneg(x21, x17, x18);
+ __ Mneg(x22, x18, x19);
+ __ Mneg(x23, x19, x19);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(0xffffffff, x2);
+ ASSERT_EQUAL_64(1, x3);
+ ASSERT_EQUAL_64(0, x4);
+ ASSERT_EQUAL_64(0xffffffff, x5);
+ ASSERT_EQUAL_64(0xffffffff00000001UL, x6);
+ ASSERT_EQUAL_64(1, x7);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0, x12);
+ ASSERT_EQUAL_64(1, x13);
+ ASSERT_EQUAL_64(0xffffffff, x14);
+ ASSERT_EQUAL_64(0, x20);
+ ASSERT_EQUAL_64(0xffffffff00000001UL, x21);
+ ASSERT_EQUAL_64(0xffffffff, x22);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x23);
+
+ TEARDOWN();
+}
+
+
+static void SmullHelper(int64_t expected, int64_t a, int64_t b) {
+ SETUP();
+ START();
+ __ Mov(w0, a);
+ __ Mov(w1, b);
+ __ Smull(x2, w0, w1);
+ END();
+ RUN();
+ ASSERT_EQUAL_64(expected, x2);
+ TEARDOWN();
+}
+
+
+TEST(smull) {
+ INIT_V8();
+ SmullHelper(0, 0, 0);
+ SmullHelper(1, 1, 1);
+ SmullHelper(-1, -1, 1);
+ SmullHelper(1, -1, -1);
+ SmullHelper(0xffffffff80000000, 0x80000000, 1);
+ SmullHelper(0x0000000080000000, 0x00010000, 0x00008000);
+}
+
+
+TEST(madd) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+
+ __ Madd(w0, w16, w16, w16);
+ __ Madd(w1, w16, w16, w17);
+ __ Madd(w2, w16, w16, w18);
+ __ Madd(w3, w16, w16, w19);
+ __ Madd(w4, w16, w17, w17);
+ __ Madd(w5, w17, w17, w18);
+ __ Madd(w6, w17, w17, w19);
+ __ Madd(w7, w17, w18, w16);
+ __ Madd(w8, w17, w18, w18);
+ __ Madd(w9, w18, w18, w17);
+ __ Madd(w10, w18, w19, w18);
+ __ Madd(w11, w19, w19, w19);
+
+ __ Madd(x12, x16, x16, x16);
+ __ Madd(x13, x16, x16, x17);
+ __ Madd(x14, x16, x16, x18);
+ __ Madd(x15, x16, x16, x19);
+ __ Madd(x20, x16, x17, x17);
+ __ Madd(x21, x17, x17, x18);
+ __ Madd(x22, x17, x17, x19);
+ __ Madd(x23, x17, x18, x16);
+ __ Madd(x24, x17, x18, x18);
+ __ Madd(x25, x18, x18, x17);
+ __ Madd(x26, x18, x19, x18);
+ __ Madd(x27, x19, x19, x19);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(0xffffffff, x2);
+ ASSERT_EQUAL_64(0xffffffff, x3);
+ ASSERT_EQUAL_64(1, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0, x6);
+ ASSERT_EQUAL_64(0xffffffff, x7);
+ ASSERT_EQUAL_64(0xfffffffe, x8);
+ ASSERT_EQUAL_64(2, x9);
+ ASSERT_EQUAL_64(0, x10);
+ ASSERT_EQUAL_64(0, x11);
+
+ ASSERT_EQUAL_64(0, x12);
+ ASSERT_EQUAL_64(1, x13);
+ ASSERT_EQUAL_64(0xffffffff, x14);
+ ASSERT_EQUAL_64(0xffffffffffffffff, x15);
+ ASSERT_EQUAL_64(1, x20);
+ ASSERT_EQUAL_64(0x100000000UL, x21);
+ ASSERT_EQUAL_64(0, x22);
+ ASSERT_EQUAL_64(0xffffffff, x23);
+ ASSERT_EQUAL_64(0x1fffffffe, x24);
+ ASSERT_EQUAL_64(0xfffffffe00000002UL, x25);
+ ASSERT_EQUAL_64(0, x26);
+ ASSERT_EQUAL_64(0, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(msub) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+
+ __ Msub(w0, w16, w16, w16);
+ __ Msub(w1, w16, w16, w17);
+ __ Msub(w2, w16, w16, w18);
+ __ Msub(w3, w16, w16, w19);
+ __ Msub(w4, w16, w17, w17);
+ __ Msub(w5, w17, w17, w18);
+ __ Msub(w6, w17, w17, w19);
+ __ Msub(w7, w17, w18, w16);
+ __ Msub(w8, w17, w18, w18);
+ __ Msub(w9, w18, w18, w17);
+ __ Msub(w10, w18, w19, w18);
+ __ Msub(w11, w19, w19, w19);
+
+ __ Msub(x12, x16, x16, x16);
+ __ Msub(x13, x16, x16, x17);
+ __ Msub(x14, x16, x16, x18);
+ __ Msub(x15, x16, x16, x19);
+ __ Msub(x20, x16, x17, x17);
+ __ Msub(x21, x17, x17, x18);
+ __ Msub(x22, x17, x17, x19);
+ __ Msub(x23, x17, x18, x16);
+ __ Msub(x24, x17, x18, x18);
+ __ Msub(x25, x18, x18, x17);
+ __ Msub(x26, x18, x19, x18);
+ __ Msub(x27, x19, x19, x19);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(0xffffffff, x2);
+ ASSERT_EQUAL_64(0xffffffff, x3);
+ ASSERT_EQUAL_64(1, x4);
+ ASSERT_EQUAL_64(0xfffffffe, x5);
+ ASSERT_EQUAL_64(0xfffffffe, x6);
+ ASSERT_EQUAL_64(1, x7);
+ ASSERT_EQUAL_64(0, x8);
+ ASSERT_EQUAL_64(0, x9);
+ ASSERT_EQUAL_64(0xfffffffe, x10);
+ ASSERT_EQUAL_64(0xfffffffe, x11);
+
+ ASSERT_EQUAL_64(0, x12);
+ ASSERT_EQUAL_64(1, x13);
+ ASSERT_EQUAL_64(0xffffffff, x14);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x15);
+ ASSERT_EQUAL_64(1, x20);
+ ASSERT_EQUAL_64(0xfffffffeUL, x21);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x22);
+ ASSERT_EQUAL_64(0xffffffff00000001UL, x23);
+ ASSERT_EQUAL_64(0, x24);
+ ASSERT_EQUAL_64(0x200000000UL, x25);
+ ASSERT_EQUAL_64(0x1fffffffeUL, x26);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(smulh) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x20, 0);
+ __ Mov(x21, 1);
+ __ Mov(x22, 0x0000000100000000L);
+ __ Mov(x23, 0x12345678);
+ __ Mov(x24, 0x0123456789abcdefL);
+ __ Mov(x25, 0x0000000200000000L);
+ __ Mov(x26, 0x8000000000000000UL);
+ __ Mov(x27, 0xffffffffffffffffUL);
+ __ Mov(x28, 0x5555555555555555UL);
+ __ Mov(x29, 0xaaaaaaaaaaaaaaaaUL);
+
+ __ Smulh(x0, x20, x24);
+ __ Smulh(x1, x21, x24);
+ __ Smulh(x2, x22, x23);
+ __ Smulh(x3, x22, x24);
+ __ Smulh(x4, x24, x25);
+ __ Smulh(x5, x23, x27);
+ __ Smulh(x6, x26, x26);
+ __ Smulh(x7, x26, x27);
+ __ Smulh(x8, x27, x27);
+ __ Smulh(x9, x28, x28);
+ __ Smulh(x10, x28, x29);
+ __ Smulh(x11, x29, x29);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(0, x2);
+ ASSERT_EQUAL_64(0x01234567, x3);
+ ASSERT_EQUAL_64(0x02468acf, x4);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x5);
+ ASSERT_EQUAL_64(0x4000000000000000UL, x6);
+ ASSERT_EQUAL_64(0, x7);
+ ASSERT_EQUAL_64(0, x8);
+ ASSERT_EQUAL_64(0x1c71c71c71c71c71UL, x9);
+ ASSERT_EQUAL_64(0xe38e38e38e38e38eUL, x10);
+ ASSERT_EQUAL_64(0x1c71c71c71c71c72UL, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(smaddl_umaddl) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+ __ Mov(x20, 4);
+ __ Mov(x21, 0x200000000UL);
+
+ __ Smaddl(x9, w17, w18, x20);
+ __ Smaddl(x10, w18, w18, x20);
+ __ Smaddl(x11, w19, w19, x20);
+ __ Smaddl(x12, w19, w19, x21);
+ __ Umaddl(x13, w17, w18, x20);
+ __ Umaddl(x14, w18, w18, x20);
+ __ Umaddl(x15, w19, w19, x20);
+ __ Umaddl(x22, w19, w19, x21);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(3, x9);
+ ASSERT_EQUAL_64(5, x10);
+ ASSERT_EQUAL_64(5, x11);
+ ASSERT_EQUAL_64(0x200000001UL, x12);
+ ASSERT_EQUAL_64(0x100000003UL, x13);
+ ASSERT_EQUAL_64(0xfffffffe00000005UL, x14);
+ ASSERT_EQUAL_64(0xfffffffe00000005UL, x15);
+ ASSERT_EQUAL_64(0x1, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(smsubl_umsubl) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x17, 1);
+ __ Mov(x18, 0xffffffff);
+ __ Mov(x19, 0xffffffffffffffffUL);
+ __ Mov(x20, 4);
+ __ Mov(x21, 0x200000000UL);
+
+ __ Smsubl(x9, w17, w18, x20);
+ __ Smsubl(x10, w18, w18, x20);
+ __ Smsubl(x11, w19, w19, x20);
+ __ Smsubl(x12, w19, w19, x21);
+ __ Umsubl(x13, w17, w18, x20);
+ __ Umsubl(x14, w18, w18, x20);
+ __ Umsubl(x15, w19, w19, x20);
+ __ Umsubl(x22, w19, w19, x21);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(5, x9);
+ ASSERT_EQUAL_64(3, x10);
+ ASSERT_EQUAL_64(3, x11);
+ ASSERT_EQUAL_64(0x1ffffffffUL, x12);
+ ASSERT_EQUAL_64(0xffffffff00000005UL, x13);
+ ASSERT_EQUAL_64(0x200000003UL, x14);
+ ASSERT_EQUAL_64(0x200000003UL, x15);
+ ASSERT_EQUAL_64(0x3ffffffffUL, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(div) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 1);
+ __ Mov(x17, 0xffffffff);
+ __ Mov(x18, 0xffffffffffffffffUL);
+ __ Mov(x19, 0x80000000);
+ __ Mov(x20, 0x8000000000000000UL);
+ __ Mov(x21, 2);
+
+ __ Udiv(w0, w16, w16);
+ __ Udiv(w1, w17, w16);
+ __ Sdiv(w2, w16, w16);
+ __ Sdiv(w3, w16, w17);
+ __ Sdiv(w4, w17, w18);
+
+ __ Udiv(x5, x16, x16);
+ __ Udiv(x6, x17, x18);
+ __ Sdiv(x7, x16, x16);
+ __ Sdiv(x8, x16, x17);
+ __ Sdiv(x9, x17, x18);
+
+ __ Udiv(w10, w19, w21);
+ __ Sdiv(w11, w19, w21);
+ __ Udiv(x12, x19, x21);
+ __ Sdiv(x13, x19, x21);
+ __ Udiv(x14, x20, x21);
+ __ Sdiv(x15, x20, x21);
+
+ __ Udiv(w22, w19, w17);
+ __ Sdiv(w23, w19, w17);
+ __ Udiv(x24, x20, x18);
+ __ Sdiv(x25, x20, x18);
+
+ __ Udiv(x26, x16, x21);
+ __ Sdiv(x27, x16, x21);
+ __ Udiv(x28, x18, x21);
+ __ Sdiv(x29, x18, x21);
+
+ __ Mov(x17, 0);
+ __ Udiv(w18, w16, w17);
+ __ Sdiv(w19, w16, w17);
+ __ Udiv(x20, x16, x17);
+ __ Sdiv(x21, x16, x17);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(0xffffffff, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0xffffffff, x3);
+ ASSERT_EQUAL_64(1, x4);
+ ASSERT_EQUAL_64(1, x5);
+ ASSERT_EQUAL_64(0, x6);
+ ASSERT_EQUAL_64(1, x7);
+ ASSERT_EQUAL_64(0, x8);
+ ASSERT_EQUAL_64(0xffffffff00000001UL, x9);
+ ASSERT_EQUAL_64(0x40000000, x10);
+ ASSERT_EQUAL_64(0xC0000000, x11);
+ ASSERT_EQUAL_64(0x40000000, x12);
+ ASSERT_EQUAL_64(0x40000000, x13);
+ ASSERT_EQUAL_64(0x4000000000000000UL, x14);
+ ASSERT_EQUAL_64(0xC000000000000000UL, x15);
+ ASSERT_EQUAL_64(0, x22);
+ ASSERT_EQUAL_64(0x80000000, x23);
+ ASSERT_EQUAL_64(0, x24);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x25);
+ ASSERT_EQUAL_64(0, x26);
+ ASSERT_EQUAL_64(0, x27);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x28);
+ ASSERT_EQUAL_64(0, x29);
+ ASSERT_EQUAL_64(0, x18);
+ ASSERT_EQUAL_64(0, x19);
+ ASSERT_EQUAL_64(0, x20);
+ ASSERT_EQUAL_64(0, x21);
+
+ TEARDOWN();
+}
+
+
+TEST(rbit_rev) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x24, 0xfedcba9876543210UL);
+ __ Rbit(w0, w24);
+ __ Rbit(x1, x24);
+ __ Rev16(w2, w24);
+ __ Rev16(x3, x24);
+ __ Rev(w4, w24);
+ __ Rev32(x5, x24);
+ __ Rev(x6, x24);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x084c2a6e, x0);
+ ASSERT_EQUAL_64(0x084c2a6e195d3b7fUL, x1);
+ ASSERT_EQUAL_64(0x54761032, x2);
+ ASSERT_EQUAL_64(0xdcfe98ba54761032UL, x3);
+ ASSERT_EQUAL_64(0x10325476, x4);
+ ASSERT_EQUAL_64(0x98badcfe10325476UL, x5);
+ ASSERT_EQUAL_64(0x1032547698badcfeUL, x6);
+
+ TEARDOWN();
+}
+
+
+TEST(clz_cls) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x24, 0x0008000000800000UL);
+ __ Mov(x25, 0xff800000fff80000UL);
+ __ Mov(x26, 0);
+ __ Clz(w0, w24);
+ __ Clz(x1, x24);
+ __ Clz(w2, w25);
+ __ Clz(x3, x25);
+ __ Clz(w4, w26);
+ __ Clz(x5, x26);
+ __ Cls(w6, w24);
+ __ Cls(x7, x24);
+ __ Cls(w8, w25);
+ __ Cls(x9, x25);
+ __ Cls(w10, w26);
+ __ Cls(x11, x26);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(8, x0);
+ ASSERT_EQUAL_64(12, x1);
+ ASSERT_EQUAL_64(0, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(32, x4);
+ ASSERT_EQUAL_64(64, x5);
+ ASSERT_EQUAL_64(7, x6);
+ ASSERT_EQUAL_64(11, x7);
+ ASSERT_EQUAL_64(12, x8);
+ ASSERT_EQUAL_64(8, x9);
+ ASSERT_EQUAL_64(31, x10);
+ ASSERT_EQUAL_64(63, x11);
+
+ TEARDOWN();
+}
+
+
+TEST(label) {
+ INIT_V8();
+ SETUP();
+
+ Label label_1, label_2, label_3, label_4;
+
+ START();
+ __ Mov(x0, 0x1);
+ __ Mov(x1, 0x0);
+ __ Mov(x22, lr); // Save lr.
+
+ __ B(&label_1);
+ __ B(&label_1);
+ __ B(&label_1); // Multiple branches to the same label.
+ __ Mov(x0, 0x0);
+ __ Bind(&label_2);
+ __ B(&label_3); // Forward branch.
+ __ Mov(x0, 0x0);
+ __ Bind(&label_1);
+ __ B(&label_2); // Backward branch.
+ __ Mov(x0, 0x0);
+ __ Bind(&label_3);
+ __ Bl(&label_4);
+ END();
+
+ __ Bind(&label_4);
+ __ Mov(x1, 0x1);
+ __ Mov(lr, x22);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(branch_at_start) {
+ INIT_V8();
+ SETUP();
+
+ Label good, exit;
+
+ // Test that branches can exist at the start of the buffer. (This is a
+ // boundary condition in the label-handling code.) To achieve this, we have
+ // to work around the code generated by START.
+ RESET();
+ __ B(&good);
+
+ START_AFTER_RESET();
+ __ Mov(x0, 0x0);
+ END();
+
+ __ Bind(&exit);
+ START_AFTER_RESET();
+ __ Mov(x0, 0x1);
+ END();
+
+ __ Bind(&good);
+ __ B(&exit);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1, x0);
+ TEARDOWN();
+}
+
+
+TEST(adr) {
+ INIT_V8();
+ SETUP();
+
+ Label label_1, label_2, label_3, label_4;
+
+ START();
+ __ Mov(x0, 0x0); // Set to non-zero to indicate failure.
+ __ Adr(x1, &label_3); // Set to zero to indicate success.
+
+ __ Adr(x2, &label_1); // Multiple forward references to the same label.
+ __ Adr(x3, &label_1);
+ __ Adr(x4, &label_1);
+
+ __ Bind(&label_2);
+ __ Eor(x5, x2, Operand(x3)); // Ensure that x2,x3 and x4 are identical.
+ __ Eor(x6, x2, Operand(x4));
+ __ Orr(x0, x0, Operand(x5));
+ __ Orr(x0, x0, Operand(x6));
+ __ Br(x2); // label_1, label_3
+
+ __ Bind(&label_3);
+ __ Adr(x2, &label_3); // Self-reference (offset 0).
+ __ Eor(x1, x1, Operand(x2));
+ __ Adr(x2, &label_4); // Simple forward reference.
+ __ Br(x2); // label_4
+
+ __ Bind(&label_1);
+ __ Adr(x2, &label_3); // Multiple reverse references to the same label.
+ __ Adr(x3, &label_3);
+ __ Adr(x4, &label_3);
+ __ Adr(x5, &label_2); // Simple reverse reference.
+ __ Br(x5); // label_2
+
+ __ Bind(&label_4);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x0, x0);
+ ASSERT_EQUAL_64(0x0, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(branch_cond) {
+ INIT_V8();
+ SETUP();
+
+ Label wrong;
+
+ START();
+ __ Mov(x0, 0x1);
+ __ Mov(x1, 0x1);
+ __ Mov(x2, 0x8000000000000000L);
+
+ // For each 'cmp' instruction below, condition codes other than the ones
+ // following it would branch.
+
+ __ Cmp(x1, 0);
+ __ B(&wrong, eq);
+ __ B(&wrong, lo);
+ __ B(&wrong, mi);
+ __ B(&wrong, vs);
+ __ B(&wrong, ls);
+ __ B(&wrong, lt);
+ __ B(&wrong, le);
+ Label ok_1;
+ __ B(&ok_1, ne);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_1);
+
+ __ Cmp(x1, 1);
+ __ B(&wrong, ne);
+ __ B(&wrong, lo);
+ __ B(&wrong, mi);
+ __ B(&wrong, vs);
+ __ B(&wrong, hi);
+ __ B(&wrong, lt);
+ __ B(&wrong, gt);
+ Label ok_2;
+ __ B(&ok_2, pl);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_2);
+
+ __ Cmp(x1, 2);
+ __ B(&wrong, eq);
+ __ B(&wrong, hs);
+ __ B(&wrong, pl);
+ __ B(&wrong, vs);
+ __ B(&wrong, hi);
+ __ B(&wrong, ge);
+ __ B(&wrong, gt);
+ Label ok_3;
+ __ B(&ok_3, vc);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_3);
+
+ __ Cmp(x2, 1);
+ __ B(&wrong, eq);
+ __ B(&wrong, lo);
+ __ B(&wrong, mi);
+ __ B(&wrong, vc);
+ __ B(&wrong, ls);
+ __ B(&wrong, ge);
+ __ B(&wrong, gt);
+ Label ok_4;
+ __ B(&ok_4, le);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_4);
+
+ Label ok_5;
+ __ b(&ok_5, al);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_5);
+
+ Label ok_6;
+ __ b(&ok_6, nv);
+ __ Mov(x0, 0x0);
+ __ Bind(&ok_6);
+
+ END();
+
+ __ Bind(&wrong);
+ __ Mov(x0, 0x0);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1, x0);
+
+ TEARDOWN();
+}
+
+
+TEST(branch_to_reg) {
+ INIT_V8();
+ SETUP();
+
+ // Test br.
+ Label fn1, after_fn1;
+
+ START();
+ __ Mov(x29, lr);
+
+ __ Mov(x1, 0);
+ __ B(&after_fn1);
+
+ __ Bind(&fn1);
+ __ Mov(x0, lr);
+ __ Mov(x1, 42);
+ __ Br(x0);
+
+ __ Bind(&after_fn1);
+ __ Bl(&fn1);
+
+ // Test blr.
+ Label fn2, after_fn2;
+
+ __ Mov(x2, 0);
+ __ B(&after_fn2);
+
+ __ Bind(&fn2);
+ __ Mov(x0, lr);
+ __ Mov(x2, 84);
+ __ Blr(x0);
+
+ __ Bind(&after_fn2);
+ __ Bl(&fn2);
+ __ Mov(x3, lr);
+
+ __ Mov(lr, x29);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(core.xreg(3) + kInstructionSize, x0);
+ ASSERT_EQUAL_64(42, x1);
+ ASSERT_EQUAL_64(84, x2);
+
+ TEARDOWN();
+}
+
+
+TEST(compare_branch) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0);
+ __ Mov(x2, 0);
+ __ Mov(x3, 0);
+ __ Mov(x4, 0);
+ __ Mov(x5, 0);
+ __ Mov(x16, 0);
+ __ Mov(x17, 42);
+
+ Label zt, zt_end;
+ __ Cbz(w16, &zt);
+ __ B(&zt_end);
+ __ Bind(&zt);
+ __ Mov(x0, 1);
+ __ Bind(&zt_end);
+
+ Label zf, zf_end;
+ __ Cbz(x17, &zf);
+ __ B(&zf_end);
+ __ Bind(&zf);
+ __ Mov(x1, 1);
+ __ Bind(&zf_end);
+
+ Label nzt, nzt_end;
+ __ Cbnz(w17, &nzt);
+ __ B(&nzt_end);
+ __ Bind(&nzt);
+ __ Mov(x2, 1);
+ __ Bind(&nzt_end);
+
+ Label nzf, nzf_end;
+ __ Cbnz(x16, &nzf);
+ __ B(&nzf_end);
+ __ Bind(&nzf);
+ __ Mov(x3, 1);
+ __ Bind(&nzf_end);
+
+ __ Mov(x18, 0xffffffff00000000UL);
+
+ Label a, a_end;
+ __ Cbz(w18, &a);
+ __ B(&a_end);
+ __ Bind(&a);
+ __ Mov(x4, 1);
+ __ Bind(&a_end);
+
+ Label b, b_end;
+ __ Cbnz(w18, &b);
+ __ B(&b_end);
+ __ Bind(&b);
+ __ Mov(x5, 1);
+ __ Bind(&b_end);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(1, x4);
+ ASSERT_EQUAL_64(0, x5);
+
+ TEARDOWN();
+}
+
+
+TEST(test_branch) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0);
+ __ Mov(x2, 0);
+ __ Mov(x3, 0);
+ __ Mov(x16, 0xaaaaaaaaaaaaaaaaUL);
+
+ Label bz, bz_end;
+ __ Tbz(w16, 0, &bz);
+ __ B(&bz_end);
+ __ Bind(&bz);
+ __ Mov(x0, 1);
+ __ Bind(&bz_end);
+
+ Label bo, bo_end;
+ __ Tbz(x16, 63, &bo);
+ __ B(&bo_end);
+ __ Bind(&bo);
+ __ Mov(x1, 1);
+ __ Bind(&bo_end);
+
+ Label nbz, nbz_end;
+ __ Tbnz(x16, 61, &nbz);
+ __ B(&nbz_end);
+ __ Bind(&nbz);
+ __ Mov(x2, 1);
+ __ Bind(&nbz_end);
+
+ Label nbo, nbo_end;
+ __ Tbnz(w16, 2, &nbo);
+ __ B(&nbo_end);
+ __ Bind(&nbo);
+ __ Mov(x3, 1);
+ __ Bind(&nbo_end);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0, x3);
+
+ TEARDOWN();
+}
+
+
+TEST(far_branch_backward) {
+ INIT_V8();
+
+ // Test that the MacroAssembler correctly resolves backward branches to labels
+ // that are outside the immediate range of branch instructions.
+ int max_range =
+ std::max(Instruction::ImmBranchRange(TestBranchType),
+ std::max(Instruction::ImmBranchRange(CompareBranchType),
+ Instruction::ImmBranchRange(CondBranchType)));
+
+ SETUP_SIZE(max_range + 1000 * kInstructionSize);
+
+ START();
+
+ Label done, fail;
+ Label test_tbz, test_cbz, test_bcond;
+ Label success_tbz, success_cbz, success_bcond;
+
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x10, 0);
+
+ __ B(&test_tbz);
+ __ Bind(&success_tbz);
+ __ Orr(x0, x0, 1 << 0);
+ __ B(&test_cbz);
+ __ Bind(&success_cbz);
+ __ Orr(x0, x0, 1 << 1);
+ __ B(&test_bcond);
+ __ Bind(&success_bcond);
+ __ Orr(x0, x0, 1 << 2);
+
+ __ B(&done);
+
+ // Generate enough code to overflow the immediate range of the three types of
+ // branches below.
+ for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
+ if (i % 100 == 0) {
+ // If we do land in this code, we do not want to execute so many nops
+ // before reaching the end of test (especially if tracing is activated).
+ __ B(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+ __ B(&fail);
+
+ __ Bind(&test_tbz);
+ __ Tbz(x10, 7, &success_tbz);
+ __ Bind(&test_cbz);
+ __ Cbz(x10, &success_cbz);
+ __ Bind(&test_bcond);
+ __ Cmp(x10, 0);
+ __ B(eq, &success_bcond);
+
+ // For each out-of-range branch instructions, at least two instructions should
+ // have been generated.
+ CHECK_GE(7 * kInstructionSize, __ SizeOfCodeGeneratedSince(&test_tbz));
+
+ __ Bind(&fail);
+ __ Mov(x1, 0);
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x7, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(far_branch_simple_veneer) {
+ INIT_V8();
+
+ // Test that the MacroAssembler correctly emits veneers for forward branches
+ // to labels that are outside the immediate range of branch instructions.
+ int max_range =
+ std::max(Instruction::ImmBranchRange(TestBranchType),
+ std::max(Instruction::ImmBranchRange(CompareBranchType),
+ Instruction::ImmBranchRange(CondBranchType)));
+
+ SETUP_SIZE(max_range + 1000 * kInstructionSize);
+
+ START();
+
+ Label done, fail;
+ Label test_tbz, test_cbz, test_bcond;
+ Label success_tbz, success_cbz, success_bcond;
+
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x10, 0);
+
+ __ Bind(&test_tbz);
+ __ Tbz(x10, 7, &success_tbz);
+ __ Bind(&test_cbz);
+ __ Cbz(x10, &success_cbz);
+ __ Bind(&test_bcond);
+ __ Cmp(x10, 0);
+ __ B(eq, &success_bcond);
+
+ // Generate enough code to overflow the immediate range of the three types of
+ // branches below.
+ for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
+ if (i % 100 == 0) {
+ // If we do land in this code, we do not want to execute so many nops
+ // before reaching the end of test (especially if tracing is activated).
+ // Also, the branches give the MacroAssembler the opportunity to emit the
+ // veneers.
+ __ B(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+ __ B(&fail);
+
+ __ Bind(&success_tbz);
+ __ Orr(x0, x0, 1 << 0);
+ __ B(&test_cbz);
+ __ Bind(&success_cbz);
+ __ Orr(x0, x0, 1 << 1);
+ __ B(&test_bcond);
+ __ Bind(&success_bcond);
+ __ Orr(x0, x0, 1 << 2);
+
+ __ B(&done);
+ __ Bind(&fail);
+ __ Mov(x1, 0);
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x7, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(far_branch_veneer_link_chain) {
+ INIT_V8();
+
+ // Test that the MacroAssembler correctly emits veneers for forward branches
+ // that target out-of-range labels and are part of multiple instructions
+ // jumping to that label.
+ //
+ // We test the three situations with the different types of instruction:
+ // (1)- When the branch is at the start of the chain with tbz.
+ // (2)- When the branch is in the middle of the chain with cbz.
+ // (3)- When the branch is at the end of the chain with bcond.
+ int max_range =
+ std::max(Instruction::ImmBranchRange(TestBranchType),
+ std::max(Instruction::ImmBranchRange(CompareBranchType),
+ Instruction::ImmBranchRange(CondBranchType)));
+
+ SETUP_SIZE(max_range + 1000 * kInstructionSize);
+
+ START();
+
+ Label skip, fail, done;
+ Label test_tbz, test_cbz, test_bcond;
+ Label success_tbz, success_cbz, success_bcond;
+
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x10, 0);
+
+ __ B(&skip);
+ // Branches at the start of the chain for situations (2) and (3).
+ __ B(&success_cbz);
+ __ B(&success_bcond);
+ __ Nop();
+ __ B(&success_bcond);
+ __ B(&success_cbz);
+ __ Bind(&skip);
+
+ __ Bind(&test_tbz);
+ __ Tbz(x10, 7, &success_tbz);
+ __ Bind(&test_cbz);
+ __ Cbz(x10, &success_cbz);
+ __ Bind(&test_bcond);
+ __ Cmp(x10, 0);
+ __ B(eq, &success_bcond);
+
+ skip.Unuse();
+ __ B(&skip);
+ // Branches at the end of the chain for situations (1) and (2).
+ __ B(&success_cbz);
+ __ B(&success_tbz);
+ __ Nop();
+ __ B(&success_tbz);
+ __ B(&success_cbz);
+ __ Bind(&skip);
+
+ // Generate enough code to overflow the immediate range of the three types of
+ // branches below.
+ for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
+ if (i % 100 == 0) {
+ // If we do land in this code, we do not want to execute so many nops
+ // before reaching the end of test (especially if tracing is activated).
+ // Also, the branches give the MacroAssembler the opportunity to emit the
+ // veneers.
+ __ B(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+ __ B(&fail);
+
+ __ Bind(&success_tbz);
+ __ Orr(x0, x0, 1 << 0);
+ __ B(&test_cbz);
+ __ Bind(&success_cbz);
+ __ Orr(x0, x0, 1 << 1);
+ __ B(&test_bcond);
+ __ Bind(&success_bcond);
+ __ Orr(x0, x0, 1 << 2);
+
+ __ B(&done);
+ __ Bind(&fail);
+ __ Mov(x1, 0);
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x7, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(far_branch_veneer_broken_link_chain) {
+ INIT_V8();
+
+ // Check that the MacroAssembler correctly handles the situation when removing
+ // a branch from the link chain of a label and the two links on each side of
+ // the removed branch cannot be linked together (out of range).
+ //
+ // We test with tbz because it has a small range.
+ int max_range = Instruction::ImmBranchRange(TestBranchType);
+ int inter_range = max_range / 2 + max_range / 10;
+
+ SETUP_SIZE(3 * inter_range + 1000 * kInstructionSize);
+
+ START();
+
+ Label skip, fail, done;
+ Label test_1, test_2, test_3;
+ Label far_target;
+
+ __ Mov(x0, 0); // Indicates the origin of the branch.
+ __ Mov(x1, 1);
+ __ Mov(x10, 0);
+
+ // First instruction in the label chain.
+ __ Bind(&test_1);
+ __ Mov(x0, 1);
+ __ B(&far_target);
+
+ for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
+ if (i % 100 == 0) {
+ // Do not allow generating veneers. They should not be needed.
+ __ b(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+
+ // Will need a veneer to point to reach the target.
+ __ Bind(&test_2);
+ __ Mov(x0, 2);
+ __ Tbz(x10, 7, &far_target);
+
+ for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
+ if (i % 100 == 0) {
+ // Do not allow generating veneers. They should not be needed.
+ __ b(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+
+ // Does not need a veneer to reach the target, but the initial branch
+ // instruction is out of range.
+ __ Bind(&test_3);
+ __ Mov(x0, 3);
+ __ Tbz(x10, 7, &far_target);
+
+ for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
+ if (i % 100 == 0) {
+ // Allow generating veneers.
+ __ B(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+
+ __ B(&fail);
+
+ __ Bind(&far_target);
+ __ Cmp(x0, 1);
+ __ B(eq, &test_2);
+ __ Cmp(x0, 2);
+ __ B(eq, &test_3);
+
+ __ B(&done);
+ __ Bind(&fail);
+ __ Mov(x1, 0);
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x3, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(branch_type) {
+ INIT_V8();
+
+ SETUP();
+
+ Label fail, done;
+
+ START();
+ __ Mov(x0, 0x0);
+ __ Mov(x10, 0x7);
+ __ Mov(x11, 0x0);
+
+ // Test non taken branches.
+ __ Cmp(x10, 0x7);
+ __ B(&fail, ne);
+ __ B(&fail, never);
+ __ B(&fail, reg_zero, x10);
+ __ B(&fail, reg_not_zero, x11);
+ __ B(&fail, reg_bit_clear, x10, 0);
+ __ B(&fail, reg_bit_set, x10, 3);
+
+ // Test taken branches.
+ Label l1, l2, l3, l4, l5;
+ __ Cmp(x10, 0x7);
+ __ B(&l1, eq);
+ __ B(&fail);
+ __ Bind(&l1);
+ __ B(&l2, always);
+ __ B(&fail);
+ __ Bind(&l2);
+ __ B(&l3, reg_not_zero, x10);
+ __ B(&fail);
+ __ Bind(&l3);
+ __ B(&l4, reg_bit_clear, x10, 15);
+ __ B(&fail);
+ __ Bind(&l4);
+ __ B(&l5, reg_bit_set, x10, 1);
+ __ B(&fail);
+ __ Bind(&l5);
+
+ __ B(&done);
+
+ __ Bind(&fail);
+ __ Mov(x0, 0x1);
+
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x0, x0);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_str_offset) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
+ uint64_t dst[5] = {0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Ldr(w0, MemOperand(x17));
+ __ Str(w0, MemOperand(x18));
+ __ Ldr(w1, MemOperand(x17, 4));
+ __ Str(w1, MemOperand(x18, 12));
+ __ Ldr(x2, MemOperand(x17, 8));
+ __ Str(x2, MemOperand(x18, 16));
+ __ Ldrb(w3, MemOperand(x17, 1));
+ __ Strb(w3, MemOperand(x18, 25));
+ __ Ldrh(w4, MemOperand(x17, 2));
+ __ Strh(w4, MemOperand(x18, 33));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x76543210, x0);
+ ASSERT_EQUAL_64(0x76543210, dst[0]);
+ ASSERT_EQUAL_64(0xfedcba98, x1);
+ ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, x2);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
+ ASSERT_EQUAL_64(0x32, x3);
+ ASSERT_EQUAL_64(0x3200, dst[3]);
+ ASSERT_EQUAL_64(0x7654, x4);
+ ASSERT_EQUAL_64(0x765400, dst[4]);
+ ASSERT_EQUAL_64(src_base, x17);
+ ASSERT_EQUAL_64(dst_base, x18);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_str_wide) {
+ INIT_V8();
+ SETUP();
+
+ uint32_t src[8192];
+ uint32_t dst[8192];
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+ memset(src, 0xaa, 8192 * sizeof(src[0]));
+ memset(dst, 0xaa, 8192 * sizeof(dst[0]));
+ src[0] = 0;
+ src[6144] = 6144;
+ src[8191] = 8191;
+
+ START();
+ __ Mov(x22, src_base);
+ __ Mov(x23, dst_base);
+ __ Mov(x24, src_base);
+ __ Mov(x25, dst_base);
+ __ Mov(x26, src_base);
+ __ Mov(x27, dst_base);
+
+ __ Ldr(w0, MemOperand(x22, 8191 * sizeof(src[0])));
+ __ Str(w0, MemOperand(x23, 8191 * sizeof(dst[0])));
+ __ Ldr(w1, MemOperand(x24, 4096 * sizeof(src[0]), PostIndex));
+ __ Str(w1, MemOperand(x25, 4096 * sizeof(dst[0]), PostIndex));
+ __ Ldr(w2, MemOperand(x26, 6144 * sizeof(src[0]), PreIndex));
+ __ Str(w2, MemOperand(x27, 6144 * sizeof(dst[0]), PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(8191, w0);
+ ASSERT_EQUAL_32(8191, dst[8191]);
+ ASSERT_EQUAL_64(src_base, x22);
+ ASSERT_EQUAL_64(dst_base, x23);
+ ASSERT_EQUAL_32(0, w1);
+ ASSERT_EQUAL_32(0, dst[0]);
+ ASSERT_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24);
+ ASSERT_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25);
+ ASSERT_EQUAL_32(6144, w2);
+ ASSERT_EQUAL_32(6144, dst[6144]);
+ ASSERT_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26);
+ ASSERT_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_str_preindex) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
+ uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Mov(x19, src_base);
+ __ Mov(x20, dst_base);
+ __ Mov(x21, src_base + 16);
+ __ Mov(x22, dst_base + 40);
+ __ Mov(x23, src_base);
+ __ Mov(x24, dst_base);
+ __ Mov(x25, src_base);
+ __ Mov(x26, dst_base);
+ __ Ldr(w0, MemOperand(x17, 4, PreIndex));
+ __ Str(w0, MemOperand(x18, 12, PreIndex));
+ __ Ldr(x1, MemOperand(x19, 8, PreIndex));
+ __ Str(x1, MemOperand(x20, 16, PreIndex));
+ __ Ldr(w2, MemOperand(x21, -4, PreIndex));
+ __ Str(w2, MemOperand(x22, -4, PreIndex));
+ __ Ldrb(w3, MemOperand(x23, 1, PreIndex));
+ __ Strb(w3, MemOperand(x24, 25, PreIndex));
+ __ Ldrh(w4, MemOperand(x25, 3, PreIndex));
+ __ Strh(w4, MemOperand(x26, 41, PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xfedcba98, x0);
+ ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, x1);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
+ ASSERT_EQUAL_64(0x01234567, x2);
+ ASSERT_EQUAL_64(0x0123456700000000UL, dst[4]);
+ ASSERT_EQUAL_64(0x32, x3);
+ ASSERT_EQUAL_64(0x3200, dst[3]);
+ ASSERT_EQUAL_64(0x9876, x4);
+ ASSERT_EQUAL_64(0x987600, dst[5]);
+ ASSERT_EQUAL_64(src_base + 4, x17);
+ ASSERT_EQUAL_64(dst_base + 12, x18);
+ ASSERT_EQUAL_64(src_base + 8, x19);
+ ASSERT_EQUAL_64(dst_base + 16, x20);
+ ASSERT_EQUAL_64(src_base + 12, x21);
+ ASSERT_EQUAL_64(dst_base + 36, x22);
+ ASSERT_EQUAL_64(src_base + 1, x23);
+ ASSERT_EQUAL_64(dst_base + 25, x24);
+ ASSERT_EQUAL_64(src_base + 3, x25);
+ ASSERT_EQUAL_64(dst_base + 41, x26);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_str_postindex) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
+ uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base + 4);
+ __ Mov(x18, dst_base + 12);
+ __ Mov(x19, src_base + 8);
+ __ Mov(x20, dst_base + 16);
+ __ Mov(x21, src_base + 8);
+ __ Mov(x22, dst_base + 32);
+ __ Mov(x23, src_base + 1);
+ __ Mov(x24, dst_base + 25);
+ __ Mov(x25, src_base + 3);
+ __ Mov(x26, dst_base + 41);
+ __ Ldr(w0, MemOperand(x17, 4, PostIndex));
+ __ Str(w0, MemOperand(x18, 12, PostIndex));
+ __ Ldr(x1, MemOperand(x19, 8, PostIndex));
+ __ Str(x1, MemOperand(x20, 16, PostIndex));
+ __ Ldr(x2, MemOperand(x21, -8, PostIndex));
+ __ Str(x2, MemOperand(x22, -32, PostIndex));
+ __ Ldrb(w3, MemOperand(x23, 1, PostIndex));
+ __ Strb(w3, MemOperand(x24, 5, PostIndex));
+ __ Ldrh(w4, MemOperand(x25, -3, PostIndex));
+ __ Strh(w4, MemOperand(x26, -41, PostIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xfedcba98, x0);
+ ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, x1);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, x2);
+ ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[4]);
+ ASSERT_EQUAL_64(0x32, x3);
+ ASSERT_EQUAL_64(0x3200, dst[3]);
+ ASSERT_EQUAL_64(0x9876, x4);
+ ASSERT_EQUAL_64(0x987600, dst[5]);
+ ASSERT_EQUAL_64(src_base + 8, x17);
+ ASSERT_EQUAL_64(dst_base + 24, x18);
+ ASSERT_EQUAL_64(src_base + 16, x19);
+ ASSERT_EQUAL_64(dst_base + 32, x20);
+ ASSERT_EQUAL_64(src_base, x21);
+ ASSERT_EQUAL_64(dst_base, x22);
+ ASSERT_EQUAL_64(src_base + 2, x23);
+ ASSERT_EQUAL_64(dst_base + 30, x24);
+ ASSERT_EQUAL_64(src_base, x25);
+ ASSERT_EQUAL_64(dst_base, x26);
+
+ TEARDOWN();
+}
+
+
+TEST(load_signed) {
+ INIT_V8();
+ SETUP();
+
+ uint32_t src[2] = {0x80008080, 0x7fff7f7f};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+
+ START();
+ __ Mov(x24, src_base);
+ __ Ldrsb(w0, MemOperand(x24));
+ __ Ldrsb(w1, MemOperand(x24, 4));
+ __ Ldrsh(w2, MemOperand(x24));
+ __ Ldrsh(w3, MemOperand(x24, 4));
+ __ Ldrsb(x4, MemOperand(x24));
+ __ Ldrsb(x5, MemOperand(x24, 4));
+ __ Ldrsh(x6, MemOperand(x24));
+ __ Ldrsh(x7, MemOperand(x24, 4));
+ __ Ldrsw(x8, MemOperand(x24));
+ __ Ldrsw(x9, MemOperand(x24, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffff80, x0);
+ ASSERT_EQUAL_64(0x0000007f, x1);
+ ASSERT_EQUAL_64(0xffff8080, x2);
+ ASSERT_EQUAL_64(0x00007f7f, x3);
+ ASSERT_EQUAL_64(0xffffffffffffff80UL, x4);
+ ASSERT_EQUAL_64(0x000000000000007fUL, x5);
+ ASSERT_EQUAL_64(0xffffffffffff8080UL, x6);
+ ASSERT_EQUAL_64(0x0000000000007f7fUL, x7);
+ ASSERT_EQUAL_64(0xffffffff80008080UL, x8);
+ ASSERT_EQUAL_64(0x000000007fff7f7fUL, x9);
+
+ TEARDOWN();
+}
+
+
+TEST(load_store_regoffset) {
+ INIT_V8();
+ SETUP();
+
+ uint32_t src[3] = {1, 2, 3};
+ uint32_t dst[4] = {0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, src_base + 3 * sizeof(src[0]));
+ __ Mov(x19, dst_base + 3 * sizeof(dst[0]));
+ __ Mov(x20, dst_base + 4 * sizeof(dst[0]));
+ __ Mov(x24, 0);
+ __ Mov(x25, 4);
+ __ Mov(x26, -4);
+ __ Mov(x27, 0xfffffffc); // 32-bit -4.
+ __ Mov(x28, 0xfffffffe); // 32-bit -2.
+ __ Mov(x29, 0xffffffff); // 32-bit -1.
+
+ __ Ldr(w0, MemOperand(x16, x24));
+ __ Ldr(x1, MemOperand(x16, x25));
+ __ Ldr(w2, MemOperand(x18, x26));
+ __ Ldr(w3, MemOperand(x18, x27, SXTW));
+ __ Ldr(w4, MemOperand(x18, x28, SXTW, 2));
+ __ Str(w0, MemOperand(x17, x24));
+ __ Str(x1, MemOperand(x17, x25));
+ __ Str(w2, MemOperand(x20, x29, SXTW, 2));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(0x0000000300000002UL, x1);
+ ASSERT_EQUAL_64(3, x2);
+ ASSERT_EQUAL_64(3, x3);
+ ASSERT_EQUAL_64(2, x4);
+ ASSERT_EQUAL_32(1, dst[0]);
+ ASSERT_EQUAL_32(2, dst[1]);
+ ASSERT_EQUAL_32(3, dst[2]);
+ ASSERT_EQUAL_32(3, dst[3]);
+
+ TEARDOWN();
+}
+
+
+TEST(load_store_float) {
+ INIT_V8();
+ SETUP();
+
+ float src[3] = {1.0, 2.0, 3.0};
+ float dst[3] = {0.0, 0.0, 0.0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Mov(x19, src_base);
+ __ Mov(x20, dst_base);
+ __ Mov(x21, src_base);
+ __ Mov(x22, dst_base);
+ __ Ldr(s0, MemOperand(x17, sizeof(src[0])));
+ __ Str(s0, MemOperand(x18, sizeof(dst[0]), PostIndex));
+ __ Ldr(s1, MemOperand(x19, sizeof(src[0]), PostIndex));
+ __ Str(s1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
+ __ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
+ __ Str(s2, MemOperand(x22, sizeof(dst[0])));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(2.0, s0);
+ ASSERT_EQUAL_FP32(2.0, dst[0]);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(1.0, dst[2]);
+ ASSERT_EQUAL_FP32(3.0, s2);
+ ASSERT_EQUAL_FP32(3.0, dst[1]);
+ ASSERT_EQUAL_64(src_base, x17);
+ ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
+ ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
+ ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
+ ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
+ ASSERT_EQUAL_64(dst_base, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(load_store_double) {
+ INIT_V8();
+ SETUP();
+
+ double src[3] = {1.0, 2.0, 3.0};
+ double dst[3] = {0.0, 0.0, 0.0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Mov(x19, src_base);
+ __ Mov(x20, dst_base);
+ __ Mov(x21, src_base);
+ __ Mov(x22, dst_base);
+ __ Ldr(d0, MemOperand(x17, sizeof(src[0])));
+ __ Str(d0, MemOperand(x18, sizeof(dst[0]), PostIndex));
+ __ Ldr(d1, MemOperand(x19, sizeof(src[0]), PostIndex));
+ __ Str(d1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
+ __ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
+ __ Str(d2, MemOperand(x22, sizeof(dst[0])));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP64(2.0, d0);
+ ASSERT_EQUAL_FP64(2.0, dst[0]);
+ ASSERT_EQUAL_FP64(1.0, d1);
+ ASSERT_EQUAL_FP64(1.0, dst[2]);
+ ASSERT_EQUAL_FP64(3.0, d2);
+ ASSERT_EQUAL_FP64(3.0, dst[1]);
+ ASSERT_EQUAL_64(src_base, x17);
+ ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
+ ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
+ ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
+ ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
+ ASSERT_EQUAL_64(dst_base, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_float) {
+ INIT_V8();
+ SETUP();
+
+ float src[2] = {1.0, 2.0};
+ float dst[3] = {0.0, 0.0, 0.0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Ldp(s31, s0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
+ __ Stp(s0, s31, MemOperand(x17, sizeof(dst[1]), PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s31);
+ ASSERT_EQUAL_FP32(2.0, s0);
+ ASSERT_EQUAL_FP32(0.0, dst[0]);
+ ASSERT_EQUAL_FP32(2.0, dst[1]);
+ ASSERT_EQUAL_FP32(1.0, dst[2]);
+ ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
+ ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_double) {
+ INIT_V8();
+ SETUP();
+
+ double src[2] = {1.0, 2.0};
+ double dst[3] = {0.0, 0.0, 0.0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Ldp(d31, d0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
+ __ Stp(d0, d31, MemOperand(x17, sizeof(dst[1]), PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP64(1.0, d31);
+ ASSERT_EQUAL_FP64(2.0, d0);
+ ASSERT_EQUAL_FP64(0.0, dst[0]);
+ ASSERT_EQUAL_FP64(2.0, dst[1]);
+ ASSERT_EQUAL_FP64(1.0, dst[2]);
+ ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
+ ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_offset) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
+ 0xffeeddccbbaa9988UL};
+ uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, src_base + 24);
+ __ Mov(x19, dst_base + 56);
+ __ Ldp(w0, w1, MemOperand(x16));
+ __ Ldp(w2, w3, MemOperand(x16, 4));
+ __ Ldp(x4, x5, MemOperand(x16, 8));
+ __ Ldp(w6, w7, MemOperand(x18, -12));
+ __ Ldp(x8, x9, MemOperand(x18, -16));
+ __ Stp(w0, w1, MemOperand(x17));
+ __ Stp(w2, w3, MemOperand(x17, 8));
+ __ Stp(x4, x5, MemOperand(x17, 16));
+ __ Stp(w6, w7, MemOperand(x19, -24));
+ __ Stp(x8, x9, MemOperand(x19, -16));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x44556677, x0);
+ ASSERT_EQUAL_64(0x00112233, x1);
+ ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]);
+ ASSERT_EQUAL_64(0x00112233, x2);
+ ASSERT_EQUAL_64(0xccddeeff, x3);
+ ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
+ ASSERT_EQUAL_64(0x8899aabb, x6);
+ ASSERT_EQUAL_64(0xbbaa9988, x7);
+ ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
+ ASSERT_EQUAL_64(src_base, x16);
+ ASSERT_EQUAL_64(dst_base, x17);
+ ASSERT_EQUAL_64(src_base + 24, x18);
+ ASSERT_EQUAL_64(dst_base + 56, x19);
+
+ TEARDOWN();
+}
+
+
+TEST(ldnp_stnp_offset) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
+ 0xffeeddccbbaa9988UL};
+ uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, src_base + 24);
+ __ Mov(x19, dst_base + 56);
+ __ Ldnp(w0, w1, MemOperand(x16));
+ __ Ldnp(w2, w3, MemOperand(x16, 4));
+ __ Ldnp(x4, x5, MemOperand(x16, 8));
+ __ Ldnp(w6, w7, MemOperand(x18, -12));
+ __ Ldnp(x8, x9, MemOperand(x18, -16));
+ __ Stnp(w0, w1, MemOperand(x17));
+ __ Stnp(w2, w3, MemOperand(x17, 8));
+ __ Stnp(x4, x5, MemOperand(x17, 16));
+ __ Stnp(w6, w7, MemOperand(x19, -24));
+ __ Stnp(x8, x9, MemOperand(x19, -16));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x44556677, x0);
+ ASSERT_EQUAL_64(0x00112233, x1);
+ ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]);
+ ASSERT_EQUAL_64(0x00112233, x2);
+ ASSERT_EQUAL_64(0xccddeeff, x3);
+ ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
+ ASSERT_EQUAL_64(0x8899aabb, x6);
+ ASSERT_EQUAL_64(0xbbaa9988, x7);
+ ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
+ ASSERT_EQUAL_64(src_base, x16);
+ ASSERT_EQUAL_64(dst_base, x17);
+ ASSERT_EQUAL_64(src_base + 24, x18);
+ ASSERT_EQUAL_64(dst_base + 56, x19);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_preindex) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
+ 0xffeeddccbbaa9988UL};
+ uint64_t dst[5] = {0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, dst_base + 16);
+ __ Ldp(w0, w1, MemOperand(x16, 4, PreIndex));
+ __ Mov(x19, x16);
+ __ Ldp(w2, w3, MemOperand(x16, -4, PreIndex));
+ __ Stp(w2, w3, MemOperand(x17, 4, PreIndex));
+ __ Mov(x20, x17);
+ __ Stp(w0, w1, MemOperand(x17, -4, PreIndex));
+ __ Ldp(x4, x5, MemOperand(x16, 8, PreIndex));
+ __ Mov(x21, x16);
+ __ Ldp(x6, x7, MemOperand(x16, -8, PreIndex));
+ __ Stp(x7, x6, MemOperand(x18, 8, PreIndex));
+ __ Mov(x22, x18);
+ __ Stp(x5, x4, MemOperand(x18, -8, PreIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x00112233, x0);
+ ASSERT_EQUAL_64(0xccddeeff, x1);
+ ASSERT_EQUAL_64(0x44556677, x2);
+ ASSERT_EQUAL_64(0x00112233, x3);
+ ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[0]);
+ ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+ ASSERT_EQUAL_64(0x0011223344556677UL, x6);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x7);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
+ ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]);
+ ASSERT_EQUAL_64(src_base, x16);
+ ASSERT_EQUAL_64(dst_base, x17);
+ ASSERT_EQUAL_64(dst_base + 16, x18);
+ ASSERT_EQUAL_64(src_base + 4, x19);
+ ASSERT_EQUAL_64(dst_base + 4, x20);
+ ASSERT_EQUAL_64(src_base + 8, x21);
+ ASSERT_EQUAL_64(dst_base + 24, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_stp_postindex) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t src[4] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
+ 0xffeeddccbbaa9988UL, 0x7766554433221100UL};
+ uint64_t dst[5] = {0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x16, src_base);
+ __ Mov(x17, dst_base);
+ __ Mov(x18, dst_base + 16);
+ __ Ldp(w0, w1, MemOperand(x16, 4, PostIndex));
+ __ Mov(x19, x16);
+ __ Ldp(w2, w3, MemOperand(x16, -4, PostIndex));
+ __ Stp(w2, w3, MemOperand(x17, 4, PostIndex));
+ __ Mov(x20, x17);
+ __ Stp(w0, w1, MemOperand(x17, -4, PostIndex));
+ __ Ldp(x4, x5, MemOperand(x16, 8, PostIndex));
+ __ Mov(x21, x16);
+ __ Ldp(x6, x7, MemOperand(x16, -8, PostIndex));
+ __ Stp(x7, x6, MemOperand(x18, 8, PostIndex));
+ __ Mov(x22, x18);
+ __ Stp(x5, x4, MemOperand(x18, -8, PostIndex));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x44556677, x0);
+ ASSERT_EQUAL_64(0x00112233, x1);
+ ASSERT_EQUAL_64(0x00112233, x2);
+ ASSERT_EQUAL_64(0xccddeeff, x3);
+ ASSERT_EQUAL_64(0x4455667700112233UL, dst[0]);
+ ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]);
+ ASSERT_EQUAL_64(0x0011223344556677UL, x4);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x5);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x6);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x7);
+ ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
+ ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
+ ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]);
+ ASSERT_EQUAL_64(src_base, x16);
+ ASSERT_EQUAL_64(dst_base, x17);
+ ASSERT_EQUAL_64(dst_base + 16, x18);
+ ASSERT_EQUAL_64(src_base + 4, x19);
+ ASSERT_EQUAL_64(dst_base + 4, x20);
+ ASSERT_EQUAL_64(src_base + 8, x21);
+ ASSERT_EQUAL_64(dst_base + 24, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(ldp_sign_extend) {
+ INIT_V8();
+ SETUP();
+
+ uint32_t src[2] = {0x80000000, 0x7fffffff};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+
+ START();
+ __ Mov(x24, src_base);
+ __ Ldpsw(x0, x1, MemOperand(x24));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffff80000000UL, x0);
+ ASSERT_EQUAL_64(0x000000007fffffffUL, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(ldur_stur) {
+ INIT_V8();
+ SETUP();
+
+ int64_t src[2] = {0x0123456789abcdefUL, 0x0123456789abcdefUL};
+ int64_t dst[5] = {0, 0, 0, 0, 0};
+ uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+ uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+
+ START();
+ __ Mov(x17, src_base);
+ __ Mov(x18, dst_base);
+ __ Mov(x19, src_base + 16);
+ __ Mov(x20, dst_base + 32);
+ __ Mov(x21, dst_base + 40);
+ __ Ldr(w0, MemOperand(x17, 1));
+ __ Str(w0, MemOperand(x18, 2));
+ __ Ldr(x1, MemOperand(x17, 3));
+ __ Str(x1, MemOperand(x18, 9));
+ __ Ldr(w2, MemOperand(x19, -9));
+ __ Str(w2, MemOperand(x20, -5));
+ __ Ldrb(w3, MemOperand(x19, -1));
+ __ Strb(w3, MemOperand(x21, -1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x6789abcd, x0);
+ ASSERT_EQUAL_64(0x6789abcd0000L, dst[0]);
+ ASSERT_EQUAL_64(0xabcdef0123456789L, x1);
+ ASSERT_EQUAL_64(0xcdef012345678900L, dst[1]);
+ ASSERT_EQUAL_64(0x000000ab, dst[2]);
+ ASSERT_EQUAL_64(0xabcdef01, x2);
+ ASSERT_EQUAL_64(0x00abcdef01000000L, dst[3]);
+ ASSERT_EQUAL_64(0x00000001, x3);
+ ASSERT_EQUAL_64(0x0100000000000000L, dst[4]);
+ ASSERT_EQUAL_64(src_base, x17);
+ ASSERT_EQUAL_64(dst_base, x18);
+ ASSERT_EQUAL_64(src_base + 16, x19);
+ ASSERT_EQUAL_64(dst_base + 32, x20);
+
+ TEARDOWN();
+}
+
+
+#if 0 // TODO(all) enable.
+// TODO(rodolph): Adapt w16 Literal tests for RelocInfo.
+TEST(ldr_literal) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Ldr(x2, 0x1234567890abcdefUL);
+ __ Ldr(w3, 0xfedcba09);
+ __ Ldr(d13, 1.234);
+ __ Ldr(s25, 2.5);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x2);
+ ASSERT_EQUAL_64(0xfedcba09, x3);
+ ASSERT_EQUAL_FP64(1.234, d13);
+ ASSERT_EQUAL_FP32(2.5, s25);
+
+ TEARDOWN();
+}
+
+
+static void LdrLiteralRangeHelper(ptrdiff_t range_,
+ LiteralPoolEmitOption option,
+ bool expect_dump) {
+ ASSERT(range_ > 0);
+ SETUP_SIZE(range_ + 1024);
+
+ Label label_1, label_2;
+
+ size_t range = static_cast<size_t>(range_);
+ size_t code_size = 0;
+ size_t pool_guard_size;
+
+ if (option == NoJumpRequired) {
+ // Space for an explicit branch.
+ pool_guard_size = sizeof(Instr);
+ } else {
+ pool_guard_size = 0;
+ }
+
+ START();
+ // Force a pool dump so the pool starts off empty.
+ __ EmitLiteralPool(JumpRequired);
+ ASSERT_LITERAL_POOL_SIZE(0);
+
+ __ Ldr(x0, 0x1234567890abcdefUL);
+ __ Ldr(w1, 0xfedcba09);
+ __ Ldr(d0, 1.234);
+ __ Ldr(s1, 2.5);
+ ASSERT_LITERAL_POOL_SIZE(4);
+
+ code_size += 4 * sizeof(Instr);
+
+ // Check that the requested range (allowing space for a branch over the pool)
+ // can be handled by this test.
+ ASSERT((code_size + pool_guard_size) <= range);
+
+ // Emit NOPs up to 'range', leaving space for the pool guard.
+ while ((code_size + pool_guard_size) < range) {
+ __ Nop();
+ code_size += sizeof(Instr);
+ }
+
+ // Emit the guard sequence before the literal pool.
+ if (option == NoJumpRequired) {
+ __ B(&label_1);
+ code_size += sizeof(Instr);
+ }
+
+ ASSERT(code_size == range);
+ ASSERT_LITERAL_POOL_SIZE(4);
+
+ // Possibly generate a literal pool.
+ __ CheckLiteralPool(option);
+ __ Bind(&label_1);
+ if (expect_dump) {
+ ASSERT_LITERAL_POOL_SIZE(0);
+ } else {
+ ASSERT_LITERAL_POOL_SIZE(4);
+ }
+
+ // Force a pool flush to check that a second pool functions correctly.
+ __ EmitLiteralPool(JumpRequired);
+ ASSERT_LITERAL_POOL_SIZE(0);
+
+ // These loads should be after the pool (and will require a new one).
+ __ Ldr(x4, 0x34567890abcdef12UL);
+ __ Ldr(w5, 0xdcba09fe);
+ __ Ldr(d4, 123.4);
+ __ Ldr(s5, 250.0);
+ ASSERT_LITERAL_POOL_SIZE(4);
+ END();
+
+ RUN();
+
+ // Check that the literals loaded correctly.
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x0);
+ ASSERT_EQUAL_64(0xfedcba09, x1);
+ ASSERT_EQUAL_FP64(1.234, d0);
+ ASSERT_EQUAL_FP32(2.5, s1);
+ ASSERT_EQUAL_64(0x34567890abcdef12UL, x4);
+ ASSERT_EQUAL_64(0xdcba09fe, x5);
+ ASSERT_EQUAL_FP64(123.4, d4);
+ ASSERT_EQUAL_FP32(250.0, s5);
+
+ TEARDOWN();
+}
+
+
+TEST(ldr_literal_range_1) {
+ INIT_V8();
+ LdrLiteralRangeHelper(kRecommendedLiteralPoolRange,
+ NoJumpRequired,
+ true);
+}
+
+
+TEST(ldr_literal_range_2) {
+ INIT_V8();
+ LdrLiteralRangeHelper(kRecommendedLiteralPoolRange-sizeof(Instr),
+ NoJumpRequired,
+ false);
+}
+
+
+TEST(ldr_literal_range_3) {
+ INIT_V8();
+ LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange,
+ JumpRequired,
+ true);
+}
+
+
+TEST(ldr_literal_range_4) {
+ INIT_V8();
+ LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange-sizeof(Instr),
+ JumpRequired,
+ false);
+}
+
+
+TEST(ldr_literal_range_5) {
+ INIT_V8();
+ LdrLiteralRangeHelper(kLiteralPoolCheckInterval,
+ JumpRequired,
+ false);
+}
+
+
+TEST(ldr_literal_range_6) {
+ INIT_V8();
+ LdrLiteralRangeHelper(kLiteralPoolCheckInterval-sizeof(Instr),
+ JumpRequired,
+ false);
+}
+#endif
+
+TEST(add_sub_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0x0);
+ __ Mov(x1, 0x1111);
+ __ Mov(x2, 0xffffffffffffffffL);
+ __ Mov(x3, 0x8000000000000000L);
+
+ __ Add(x10, x0, Operand(0x123));
+ __ Add(x11, x1, Operand(0x122000));
+ __ Add(x12, x0, Operand(0xabc << 12));
+ __ Add(x13, x2, Operand(1));
+
+ __ Add(w14, w0, Operand(0x123));
+ __ Add(w15, w1, Operand(0x122000));
+ __ Add(w16, w0, Operand(0xabc << 12));
+ __ Add(w17, w2, Operand(1));
+
+ __ Sub(x20, x0, Operand(0x1));
+ __ Sub(x21, x1, Operand(0x111));
+ __ Sub(x22, x1, Operand(0x1 << 12));
+ __ Sub(x23, x3, Operand(1));
+
+ __ Sub(w24, w0, Operand(0x1));
+ __ Sub(w25, w1, Operand(0x111));
+ __ Sub(w26, w1, Operand(0x1 << 12));
+ __ Sub(w27, w3, Operand(1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x123, x10);
+ ASSERT_EQUAL_64(0x123111, x11);
+ ASSERT_EQUAL_64(0xabc000, x12);
+ ASSERT_EQUAL_64(0x0, x13);
+
+ ASSERT_EQUAL_32(0x123, w14);
+ ASSERT_EQUAL_32(0x123111, w15);
+ ASSERT_EQUAL_32(0xabc000, w16);
+ ASSERT_EQUAL_32(0x0, w17);
+
+ ASSERT_EQUAL_64(0xffffffffffffffffL, x20);
+ ASSERT_EQUAL_64(0x1000, x21);
+ ASSERT_EQUAL_64(0x111, x22);
+ ASSERT_EQUAL_64(0x7fffffffffffffffL, x23);
+
+ ASSERT_EQUAL_32(0xffffffff, w24);
+ ASSERT_EQUAL_32(0x1000, w25);
+ ASSERT_EQUAL_32(0x111, w26);
+ ASSERT_EQUAL_32(0xffffffff, w27);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_wide_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0x0);
+ __ Mov(x1, 0x1);
+
+ __ Add(x10, x0, Operand(0x1234567890abcdefUL));
+ __ Add(x11, x1, Operand(0xffffffff));
+
+ __ Add(w12, w0, Operand(0x12345678));
+ __ Add(w13, w1, Operand(0xffffffff));
+
+ __ Sub(x20, x0, Operand(0x1234567890abcdefUL));
+
+ __ Sub(w21, w0, Operand(0x12345678));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
+ ASSERT_EQUAL_64(0x100000000UL, x11);
+
+ ASSERT_EQUAL_32(0x12345678, w12);
+ ASSERT_EQUAL_64(0x0, x13);
+
+ ASSERT_EQUAL_64(-0x1234567890abcdefUL, x20);
+
+ ASSERT_EQUAL_32(-0x12345678, w21);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_shifted) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+ __ Mov(x3, 0xffffffffffffffffL);
+
+ __ Add(x10, x1, Operand(x2));
+ __ Add(x11, x0, Operand(x1, LSL, 8));
+ __ Add(x12, x0, Operand(x1, LSR, 8));
+ __ Add(x13, x0, Operand(x1, ASR, 8));
+ __ Add(x14, x0, Operand(x2, ASR, 8));
+ __ Add(w15, w0, Operand(w1, ASR, 8));
+ __ Add(w18, w3, Operand(w1, ROR, 8));
+ __ Add(x19, x3, Operand(x1, ROR, 8));
+
+ __ Sub(x20, x3, Operand(x2));
+ __ Sub(x21, x3, Operand(x1, LSL, 8));
+ __ Sub(x22, x3, Operand(x1, LSR, 8));
+ __ Sub(x23, x3, Operand(x1, ASR, 8));
+ __ Sub(x24, x3, Operand(x2, ASR, 8));
+ __ Sub(w25, w3, Operand(w1, ASR, 8));
+ __ Sub(w26, w3, Operand(w1, ROR, 8));
+ __ Sub(x27, x3, Operand(x1, ROR, 8));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffffffffffffL, x10);
+ ASSERT_EQUAL_64(0x23456789abcdef00L, x11);
+ ASSERT_EQUAL_64(0x000123456789abcdL, x12);
+ ASSERT_EQUAL_64(0x000123456789abcdL, x13);
+ ASSERT_EQUAL_64(0xfffedcba98765432L, x14);
+ ASSERT_EQUAL_64(0xff89abcd, x15);
+ ASSERT_EQUAL_64(0xef89abcc, x18);
+ ASSERT_EQUAL_64(0xef0123456789abccL, x19);
+
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x20);
+ ASSERT_EQUAL_64(0xdcba9876543210ffL, x21);
+ ASSERT_EQUAL_64(0xfffedcba98765432L, x22);
+ ASSERT_EQUAL_64(0xfffedcba98765432L, x23);
+ ASSERT_EQUAL_64(0x000123456789abcdL, x24);
+ ASSERT_EQUAL_64(0x00765432, x25);
+ ASSERT_EQUAL_64(0x10765432, x26);
+ ASSERT_EQUAL_64(0x10fedcba98765432L, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_extended) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+ __ Mov(w3, 0x80);
+
+ __ Add(x10, x0, Operand(x1, UXTB, 0));
+ __ Add(x11, x0, Operand(x1, UXTB, 1));
+ __ Add(x12, x0, Operand(x1, UXTH, 2));
+ __ Add(x13, x0, Operand(x1, UXTW, 4));
+
+ __ Add(x14, x0, Operand(x1, SXTB, 0));
+ __ Add(x15, x0, Operand(x1, SXTB, 1));
+ __ Add(x16, x0, Operand(x1, SXTH, 2));
+ __ Add(x17, x0, Operand(x1, SXTW, 3));
+ __ Add(x18, x0, Operand(x2, SXTB, 0));
+ __ Add(x19, x0, Operand(x2, SXTB, 1));
+ __ Add(x20, x0, Operand(x2, SXTH, 2));
+ __ Add(x21, x0, Operand(x2, SXTW, 3));
+
+ __ Add(x22, x1, Operand(x2, SXTB, 1));
+ __ Sub(x23, x1, Operand(x2, SXTB, 1));
+
+ __ Add(w24, w1, Operand(w2, UXTB, 2));
+ __ Add(w25, w0, Operand(w1, SXTB, 0));
+ __ Add(w26, w0, Operand(w1, SXTB, 1));
+ __ Add(w27, w2, Operand(w1, SXTW, 3));
+
+ __ Add(w28, w0, Operand(w1, SXTW, 3));
+ __ Add(x29, x0, Operand(w1, SXTW, 3));
+
+ __ Sub(x30, x0, Operand(w3, SXTB, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xefL, x10);
+ ASSERT_EQUAL_64(0x1deL, x11);
+ ASSERT_EQUAL_64(0x337bcL, x12);
+ ASSERT_EQUAL_64(0x89abcdef0L, x13);
+
+ ASSERT_EQUAL_64(0xffffffffffffffefL, x14);
+ ASSERT_EQUAL_64(0xffffffffffffffdeL, x15);
+ ASSERT_EQUAL_64(0xffffffffffff37bcL, x16);
+ ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x17);
+ ASSERT_EQUAL_64(0x10L, x18);
+ ASSERT_EQUAL_64(0x20L, x19);
+ ASSERT_EQUAL_64(0xc840L, x20);
+ ASSERT_EQUAL_64(0x3b2a19080L, x21);
+
+ ASSERT_EQUAL_64(0x0123456789abce0fL, x22);
+ ASSERT_EQUAL_64(0x0123456789abcdcfL, x23);
+
+ ASSERT_EQUAL_32(0x89abce2f, w24);
+ ASSERT_EQUAL_32(0xffffffef, w25);
+ ASSERT_EQUAL_32(0xffffffde, w26);
+ ASSERT_EQUAL_32(0xc3b2a188, w27);
+
+ ASSERT_EQUAL_32(0x4d5e6f78, w28);
+ ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x29);
+
+ ASSERT_EQUAL_64(256, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_negative) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 4687);
+ __ Mov(x2, 0x1122334455667788);
+ __ Mov(w3, 0x11223344);
+ __ Mov(w4, 400000);
+
+ __ Add(x10, x0, -42);
+ __ Add(x11, x1, -687);
+ __ Add(x12, x2, -0x88);
+
+ __ Sub(x13, x0, -600);
+ __ Sub(x14, x1, -313);
+ __ Sub(x15, x2, -0x555);
+
+ __ Add(w19, w3, -0x344);
+ __ Add(w20, w4, -2000);
+
+ __ Sub(w21, w3, -0xbc);
+ __ Sub(w22, w4, -2000);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(-42, x10);
+ ASSERT_EQUAL_64(4000, x11);
+ ASSERT_EQUAL_64(0x1122334455667700, x12);
+
+ ASSERT_EQUAL_64(600, x13);
+ ASSERT_EQUAL_64(5000, x14);
+ ASSERT_EQUAL_64(0x1122334455667cdd, x15);
+
+ ASSERT_EQUAL_32(0x11223000, w19);
+ ASSERT_EQUAL_32(398000, w20);
+
+ ASSERT_EQUAL_32(0x11223400, w21);
+ ASSERT_EQUAL_32(402000, w22);
+
+ TEARDOWN();
+}
+
+
+TEST(add_sub_zero) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0);
+ __ Mov(x2, 0);
+
+ Label blob1;
+ __ Bind(&blob1);
+ __ Add(x0, x0, 0);
+ __ Sub(x1, x1, 0);
+ __ Sub(x2, x2, xzr);
+ CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&blob1));
+
+ Label blob2;
+ __ Bind(&blob2);
+ __ Add(w3, w3, 0);
+ CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob2));
+
+ Label blob3;
+ __ Bind(&blob3);
+ __ Sub(w3, w3, wzr);
+ CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob3));
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(0, x1);
+ ASSERT_EQUAL_64(0, x2);
+
+ TEARDOWN();
+}
+
+
+TEST(claim_drop_zero) {
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ Label start;
+ __ Bind(&start);
+ __ Claim(0);
+ __ Drop(0);
+ __ Claim(xzr, 8);
+ __ Drop(xzr, 8);
+ __ Claim(xzr, 0);
+ __ Drop(xzr, 0);
+ __ Claim(x7, 0);
+ __ Drop(x7, 0);
+ __ ClaimBySMI(xzr, 8);
+ __ DropBySMI(xzr, 8);
+ __ ClaimBySMI(xzr, 0);
+ __ DropBySMI(xzr, 0);
+ CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&start));
+
+ END();
+
+ RUN();
+
+ TEARDOWN();
+}
+
+
+TEST(neg) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0xf123456789abcdefL);
+
+ // Immediate.
+ __ Neg(x1, 0x123);
+ __ Neg(w2, 0x123);
+
+ // Shifted.
+ __ Neg(x3, Operand(x0, LSL, 1));
+ __ Neg(w4, Operand(w0, LSL, 2));
+ __ Neg(x5, Operand(x0, LSR, 3));
+ __ Neg(w6, Operand(w0, LSR, 4));
+ __ Neg(x7, Operand(x0, ASR, 5));
+ __ Neg(w8, Operand(w0, ASR, 6));
+
+ // Extended.
+ __ Neg(w9, Operand(w0, UXTB));
+ __ Neg(x10, Operand(x0, SXTB, 1));
+ __ Neg(w11, Operand(w0, UXTH, 2));
+ __ Neg(x12, Operand(x0, SXTH, 3));
+ __ Neg(w13, Operand(w0, UXTW, 4));
+ __ Neg(x14, Operand(x0, SXTW, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xfffffffffffffeddUL, x1);
+ ASSERT_EQUAL_64(0xfffffedd, x2);
+ ASSERT_EQUAL_64(0x1db97530eca86422UL, x3);
+ ASSERT_EQUAL_64(0xd950c844, x4);
+ ASSERT_EQUAL_64(0xe1db97530eca8643UL, x5);
+ ASSERT_EQUAL_64(0xf7654322, x6);
+ ASSERT_EQUAL_64(0x0076e5d4c3b2a191UL, x7);
+ ASSERT_EQUAL_64(0x01d950c9, x8);
+ ASSERT_EQUAL_64(0xffffff11, x9);
+ ASSERT_EQUAL_64(0x0000000000000022UL, x10);
+ ASSERT_EQUAL_64(0xfffcc844, x11);
+ ASSERT_EQUAL_64(0x0000000000019088UL, x12);
+ ASSERT_EQUAL_64(0x65432110, x13);
+ ASSERT_EQUAL_64(0x0000000765432110UL, x14);
+
+ TEARDOWN();
+}
+
+
+TEST(adc_sbc_shift) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x2, 0x0123456789abcdefL);
+ __ Mov(x3, 0xfedcba9876543210L);
+ __ Mov(x4, 0xffffffffffffffffL);
+
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+
+ __ Adc(x5, x2, Operand(x3));
+ __ Adc(x6, x0, Operand(x1, LSL, 60));
+ __ Sbc(x7, x4, Operand(x3, LSR, 4));
+ __ Adc(x8, x2, Operand(x3, ASR, 4));
+ __ Adc(x9, x2, Operand(x3, ROR, 8));
+
+ __ Adc(w10, w2, Operand(w3));
+ __ Adc(w11, w0, Operand(w1, LSL, 30));
+ __ Sbc(w12, w4, Operand(w3, LSR, 4));
+ __ Adc(w13, w2, Operand(w3, ASR, 4));
+ __ Adc(w14, w2, Operand(w3, ROR, 8));
+
+ // Set the C flag.
+ __ Cmp(w0, Operand(w0));
+
+ __ Adc(x18, x2, Operand(x3));
+ __ Adc(x19, x0, Operand(x1, LSL, 60));
+ __ Sbc(x20, x4, Operand(x3, LSR, 4));
+ __ Adc(x21, x2, Operand(x3, ASR, 4));
+ __ Adc(x22, x2, Operand(x3, ROR, 8));
+
+ __ Adc(w23, w2, Operand(w3));
+ __ Adc(w24, w0, Operand(w1, LSL, 30));
+ __ Sbc(w25, w4, Operand(w3, LSR, 4));
+ __ Adc(w26, w2, Operand(w3, ASR, 4));
+ __ Adc(w27, w2, Operand(w3, ROR, 8));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xffffffffffffffffL, x5);
+ ASSERT_EQUAL_64(1L << 60, x6);
+ ASSERT_EQUAL_64(0xf0123456789abcddL, x7);
+ ASSERT_EQUAL_64(0x0111111111111110L, x8);
+ ASSERT_EQUAL_64(0x1222222222222221L, x9);
+
+ ASSERT_EQUAL_32(0xffffffff, w10);
+ ASSERT_EQUAL_32(1 << 30, w11);
+ ASSERT_EQUAL_32(0xf89abcdd, w12);
+ ASSERT_EQUAL_32(0x91111110, w13);
+ ASSERT_EQUAL_32(0x9a222221, w14);
+
+ ASSERT_EQUAL_64(0xffffffffffffffffL + 1, x18);
+ ASSERT_EQUAL_64((1L << 60) + 1, x19);
+ ASSERT_EQUAL_64(0xf0123456789abcddL + 1, x20);
+ ASSERT_EQUAL_64(0x0111111111111110L + 1, x21);
+ ASSERT_EQUAL_64(0x1222222222222221L + 1, x22);
+
+ ASSERT_EQUAL_32(0xffffffff + 1, w23);
+ ASSERT_EQUAL_32((1 << 30) + 1, w24);
+ ASSERT_EQUAL_32(0xf89abcdd + 1, w25);
+ ASSERT_EQUAL_32(0x91111110 + 1, w26);
+ ASSERT_EQUAL_32(0x9a222221 + 1, w27);
+
+ // Check that adc correctly sets the condition flags.
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0xffffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+ ASSERT_EQUAL_64(0, x10);
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0x8000000000000000L);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1, ASR, 63));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+ ASSERT_EQUAL_64(0, x10);
+
+ START();
+ __ Mov(x0, 0x10);
+ __ Mov(x1, 0x07ffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1, LSL, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+ ASSERT_EQUAL_64(0x8000000000000000L, x10);
+
+ // Check that sbc correctly sets the condition flags.
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0xffffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Sbcs(x10, x0, Operand(x1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0, x10);
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0xffffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Sbcs(x10, x0, Operand(x1, LSR, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000001L, x10);
+
+ START();
+ __ Mov(x0, 0);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Sbcs(x10, x0, Operand(0xffffffffffffffffL));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZFlag);
+ ASSERT_EQUAL_64(0, x10);
+
+ START()
+ __ Mov(w0, 0x7fffffff);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Ngcs(w10, w0);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x80000000, x10);
+
+ START();
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Ngcs(x10, 0x7fffffffffffffffL);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000000L, x10);
+
+ START()
+ __ Mov(x0, 0);
+ // Set the C flag.
+ __ Cmp(x0, Operand(x0));
+ __ Sbcs(x10, x0, Operand(1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0xffffffffffffffffL, x10);
+
+ START()
+ __ Mov(x0, 0);
+ // Set the C flag.
+ __ Cmp(x0, Operand(x0));
+ __ Ngcs(x10, 0x7fffffffffffffffL);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+ ASSERT_EQUAL_64(0x8000000000000001L, x10);
+
+ TEARDOWN();
+}
+
+
+TEST(adc_sbc_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x2, 0x0123456789abcdefL);
+
+ __ Adc(x10, x1, Operand(w2, UXTB, 1));
+ __ Adc(x11, x1, Operand(x2, SXTH, 2));
+ __ Sbc(x12, x1, Operand(w2, UXTW, 4));
+ __ Adc(x13, x1, Operand(x2, UXTX, 4));
+
+ __ Adc(w14, w1, Operand(w2, UXTB, 1));
+ __ Adc(w15, w1, Operand(w2, SXTH, 2));
+ __ Adc(w9, w1, Operand(w2, UXTW, 4));
+
+ // Set the C flag.
+ __ Cmp(w0, Operand(w0));
+
+ __ Adc(x20, x1, Operand(w2, UXTB, 1));
+ __ Adc(x21, x1, Operand(x2, SXTH, 2));
+ __ Sbc(x22, x1, Operand(w2, UXTW, 4));
+ __ Adc(x23, x1, Operand(x2, UXTX, 4));
+
+ __ Adc(w24, w1, Operand(w2, UXTB, 1));
+ __ Adc(w25, w1, Operand(w2, SXTH, 2));
+ __ Adc(w26, w1, Operand(w2, UXTW, 4));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1df, x10);
+ ASSERT_EQUAL_64(0xffffffffffff37bdL, x11);
+ ASSERT_EQUAL_64(0xfffffff765432110L, x12);
+ ASSERT_EQUAL_64(0x123456789abcdef1L, x13);
+
+ ASSERT_EQUAL_32(0x1df, w14);
+ ASSERT_EQUAL_32(0xffff37bd, w15);
+ ASSERT_EQUAL_32(0x9abcdef1, w9);
+
+ ASSERT_EQUAL_64(0x1df + 1, x20);
+ ASSERT_EQUAL_64(0xffffffffffff37bdL + 1, x21);
+ ASSERT_EQUAL_64(0xfffffff765432110L + 1, x22);
+ ASSERT_EQUAL_64(0x123456789abcdef1L + 1, x23);
+
+ ASSERT_EQUAL_32(0x1df + 1, w24);
+ ASSERT_EQUAL_32(0xffff37bd + 1, w25);
+ ASSERT_EQUAL_32(0x9abcdef1 + 1, w26);
+
+ // Check that adc correctly sets the condition flags.
+ START();
+ __ Mov(x0, 0xff);
+ __ Mov(x1, 0xffffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1, SXTX, 1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(CFlag);
+
+ START();
+ __ Mov(x0, 0x7fffffffffffffffL);
+ __ Mov(x1, 1);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(x1, UXTB, 2));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+
+ START();
+ __ Mov(x0, 0x7fffffffffffffffL);
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Adcs(x10, x0, Operand(1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+
+ TEARDOWN();
+}
+
+
+TEST(adc_sbc_wide_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+
+ __ Adc(x7, x0, Operand(0x1234567890abcdefUL));
+ __ Adc(w8, w0, Operand(0xffffffff));
+ __ Sbc(x9, x0, Operand(0x1234567890abcdefUL));
+ __ Sbc(w10, w0, Operand(0xffffffff));
+ __ Ngc(x11, Operand(0xffffffff00000000UL));
+ __ Ngc(w12, Operand(0xffff0000));
+
+ // Set the C flag.
+ __ Cmp(w0, Operand(w0));
+
+ __ Adc(x18, x0, Operand(0x1234567890abcdefUL));
+ __ Adc(w19, w0, Operand(0xffffffff));
+ __ Sbc(x20, x0, Operand(0x1234567890abcdefUL));
+ __ Sbc(w21, w0, Operand(0xffffffff));
+ __ Ngc(x22, Operand(0xffffffff00000000UL));
+ __ Ngc(w23, Operand(0xffff0000));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1234567890abcdefUL, x7);
+ ASSERT_EQUAL_64(0xffffffff, x8);
+ ASSERT_EQUAL_64(0xedcba9876f543210UL, x9);
+ ASSERT_EQUAL_64(0, x10);
+ ASSERT_EQUAL_64(0xffffffff, x11);
+ ASSERT_EQUAL_64(0xffff, x12);
+
+ ASSERT_EQUAL_64(0x1234567890abcdefUL + 1, x18);
+ ASSERT_EQUAL_64(0, x19);
+ ASSERT_EQUAL_64(0xedcba9876f543211UL, x20);
+ ASSERT_EQUAL_64(1, x21);
+ ASSERT_EQUAL_64(0x100000000UL, x22);
+ ASSERT_EQUAL_64(0x10000, x23);
+
+ TEARDOWN();
+}
+
+
+TEST(flags) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0x1111111111111111L);
+ __ Neg(x10, Operand(x0));
+ __ Neg(x11, Operand(x1));
+ __ Neg(w12, Operand(w1));
+ // Clear the C flag.
+ __ Adds(x0, x0, Operand(0));
+ __ Ngc(x13, Operand(x0));
+ // Set the C flag.
+ __ Cmp(x0, Operand(x0));
+ __ Ngc(w14, Operand(w0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0, x10);
+ ASSERT_EQUAL_64(-0x1111111111111111L, x11);
+ ASSERT_EQUAL_32(-0x11111111, w12);
+ ASSERT_EQUAL_64(-1L, x13);
+ ASSERT_EQUAL_32(0, w14);
+
+ START();
+ __ Mov(x0, 0);
+ __ Cmp(x0, Operand(x0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ START();
+ __ Mov(w0, 0);
+ __ Cmp(w0, Operand(w0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ START();
+ __ Mov(x0, 0);
+ __ Mov(x1, 0x1111111111111111L);
+ __ Cmp(x0, Operand(x1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 0x11111111);
+ __ Cmp(w0, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+
+ START();
+ __ Mov(x1, 0x1111111111111111L);
+ __ Cmp(x1, Operand(0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(CFlag);
+
+ START();
+ __ Mov(w1, 0x11111111);
+ __ Cmp(w1, Operand(0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(CFlag);
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0x7fffffffffffffffL);
+ __ Cmn(x1, Operand(x0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+
+ START();
+ __ Mov(w0, 1);
+ __ Mov(w1, 0x7fffffff);
+ __ Cmn(w1, Operand(w0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NVFlag);
+
+ START();
+ __ Mov(x0, 1);
+ __ Mov(x1, 0xffffffffffffffffL);
+ __ Cmn(x1, Operand(x0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ START();
+ __ Mov(w0, 1);
+ __ Mov(w1, 0xffffffff);
+ __ Cmn(w1, Operand(w0));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 1);
+ // Clear the C flag.
+ __ Adds(w0, w0, Operand(0));
+ __ Ngcs(w0, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(NFlag);
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 0);
+ // Set the C flag.
+ __ Cmp(w0, Operand(w0));
+ __ Ngcs(w0, Operand(w1));
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_NZCV(ZCFlag);
+
+ TEARDOWN();
+}
+
+
+TEST(cmp_shift) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x18, 0xf0000000);
+ __ Mov(x19, 0xf000000010000000UL);
+ __ Mov(x20, 0xf0000000f0000000UL);
+ __ Mov(x21, 0x7800000078000000UL);
+ __ Mov(x22, 0x3c0000003c000000UL);
+ __ Mov(x23, 0x8000000780000000UL);
+ __ Mov(x24, 0x0000000f00000000UL);
+ __ Mov(x25, 0x00000003c0000000UL);
+ __ Mov(x26, 0x8000000780000000UL);
+ __ Mov(x27, 0xc0000003);
+
+ __ Cmp(w20, Operand(w21, LSL, 1));
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(x20, Operand(x22, LSL, 2));
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(w19, Operand(w23, LSR, 3));
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(x18, Operand(x24, LSR, 4));
+ __ Mrs(x3, NZCV);
+
+ __ Cmp(w20, Operand(w25, ASR, 2));
+ __ Mrs(x4, NZCV);
+
+ __ Cmp(x20, Operand(x26, ASR, 3));
+ __ Mrs(x5, NZCV);
+
+ __ Cmp(w27, Operand(w22, ROR, 28));
+ __ Mrs(x6, NZCV);
+
+ __ Cmp(x20, Operand(x21, ROR, 31));
+ __ Mrs(x7, NZCV);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(ZCFlag, w1);
+ ASSERT_EQUAL_32(ZCFlag, w2);
+ ASSERT_EQUAL_32(ZCFlag, w3);
+ ASSERT_EQUAL_32(ZCFlag, w4);
+ ASSERT_EQUAL_32(ZCFlag, w5);
+ ASSERT_EQUAL_32(ZCFlag, w6);
+ ASSERT_EQUAL_32(ZCFlag, w7);
+
+ TEARDOWN();
+}
+
+
+TEST(cmp_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w20, 0x2);
+ __ Mov(w21, 0x1);
+ __ Mov(x22, 0xffffffffffffffffUL);
+ __ Mov(x23, 0xff);
+ __ Mov(x24, 0xfffffffffffffffeUL);
+ __ Mov(x25, 0xffff);
+ __ Mov(x26, 0xffffffff);
+
+ __ Cmp(w20, Operand(w21, LSL, 1));
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(x22, Operand(x23, SXTB, 0));
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(x24, Operand(x23, SXTB, 1));
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(x24, Operand(x23, UXTB, 1));
+ __ Mrs(x3, NZCV);
+
+ __ Cmp(w22, Operand(w25, UXTH));
+ __ Mrs(x4, NZCV);
+
+ __ Cmp(x22, Operand(x25, SXTH));
+ __ Mrs(x5, NZCV);
+
+ __ Cmp(x22, Operand(x26, UXTW));
+ __ Mrs(x6, NZCV);
+
+ __ Cmp(x24, Operand(x26, SXTW, 1));
+ __ Mrs(x7, NZCV);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(ZCFlag, w1);
+ ASSERT_EQUAL_32(ZCFlag, w2);
+ ASSERT_EQUAL_32(NCFlag, w3);
+ ASSERT_EQUAL_32(NCFlag, w4);
+ ASSERT_EQUAL_32(ZCFlag, w5);
+ ASSERT_EQUAL_32(NCFlag, w6);
+ ASSERT_EQUAL_32(ZCFlag, w7);
+
+ TEARDOWN();
+}
+
+
+TEST(ccmp) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w16, 0);
+ __ Mov(w17, 1);
+ __ Cmp(w16, w16);
+ __ Ccmp(w16, w17, NCFlag, eq);
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(w16, w16);
+ __ Ccmp(w16, w17, NCFlag, ne);
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(x16, x16);
+ __ Ccmn(x16, 2, NZCVFlag, eq);
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(x16, x16);
+ __ Ccmn(x16, 2, NZCVFlag, ne);
+ __ Mrs(x3, NZCV);
+
+ __ ccmp(x16, x16, NZCVFlag, al);
+ __ Mrs(x4, NZCV);
+
+ __ ccmp(x16, x16, NZCVFlag, nv);
+ __ Mrs(x5, NZCV);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(NFlag, w0);
+ ASSERT_EQUAL_32(NCFlag, w1);
+ ASSERT_EQUAL_32(NoFlag, w2);
+ ASSERT_EQUAL_32(NZCVFlag, w3);
+ ASSERT_EQUAL_32(ZCFlag, w4);
+ ASSERT_EQUAL_32(ZCFlag, w5);
+
+ TEARDOWN();
+}
+
+
+TEST(ccmp_wide_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w20, 0);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(w20, Operand(0x12345678), NZCVFlag, eq);
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x20, Operand(0xffffffffffffffffUL), NZCVFlag, eq);
+ __ Mrs(x1, NZCV);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(NFlag, w0);
+ ASSERT_EQUAL_32(NoFlag, w1);
+
+ TEARDOWN();
+}
+
+
+TEST(ccmp_shift_extend) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w20, 0x2);
+ __ Mov(w21, 0x1);
+ __ Mov(x22, 0xffffffffffffffffUL);
+ __ Mov(x23, 0xff);
+ __ Mov(x24, 0xfffffffffffffffeUL);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(w20, Operand(w21, LSL, 1), NZCVFlag, eq);
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x22, Operand(x23, SXTB, 0), NZCVFlag, eq);
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x24, Operand(x23, SXTB, 1), NZCVFlag, eq);
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, eq);
+ __ Mrs(x3, NZCV);
+
+ __ Cmp(w20, Operand(w20));
+ __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, ne);
+ __ Mrs(x4, NZCV);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(ZCFlag, w1);
+ ASSERT_EQUAL_32(ZCFlag, w2);
+ ASSERT_EQUAL_32(NCFlag, w3);
+ ASSERT_EQUAL_32(NZCVFlag, w4);
+
+ TEARDOWN();
+}
+
+
+TEST(csel) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Mov(x24, 0x0000000f0000000fUL);
+ __ Mov(x25, 0x0000001f0000001fUL);
+ __ Mov(x26, 0);
+ __ Mov(x27, 0);
+
+ __ Cmp(w16, 0);
+ __ Csel(w0, w24, w25, eq);
+ __ Csel(w1, w24, w25, ne);
+ __ Csinc(w2, w24, w25, mi);
+ __ Csinc(w3, w24, w25, pl);
+
+ __ csel(w13, w24, w25, al);
+ __ csel(x14, x24, x25, nv);
+
+ __ Cmp(x16, 1);
+ __ Csinv(x4, x24, x25, gt);
+ __ Csinv(x5, x24, x25, le);
+ __ Csneg(x6, x24, x25, hs);
+ __ Csneg(x7, x24, x25, lo);
+
+ __ Cset(w8, ne);
+ __ Csetm(w9, ne);
+ __ Cinc(x10, x25, ne);
+ __ Cinv(x11, x24, ne);
+ __ Cneg(x12, x24, ne);
+
+ __ csel(w15, w24, w25, al);
+ __ csel(x18, x24, x25, nv);
+
+ __ CzeroX(x24, ne);
+ __ CzeroX(x25, eq);
+
+ __ CmovX(x26, x25, ne);
+ __ CmovX(x27, x25, eq);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x0000000f, x0);
+ ASSERT_EQUAL_64(0x0000001f, x1);
+ ASSERT_EQUAL_64(0x00000020, x2);
+ ASSERT_EQUAL_64(0x0000000f, x3);
+ ASSERT_EQUAL_64(0xffffffe0ffffffe0UL, x4);
+ ASSERT_EQUAL_64(0x0000000f0000000fUL, x5);
+ ASSERT_EQUAL_64(0xffffffe0ffffffe1UL, x6);
+ ASSERT_EQUAL_64(0x0000000f0000000fUL, x7);
+ ASSERT_EQUAL_64(0x00000001, x8);
+ ASSERT_EQUAL_64(0xffffffff, x9);
+ ASSERT_EQUAL_64(0x0000001f00000020UL, x10);
+ ASSERT_EQUAL_64(0xfffffff0fffffff0UL, x11);
+ ASSERT_EQUAL_64(0xfffffff0fffffff1UL, x12);
+ ASSERT_EQUAL_64(0x0000000f, x13);
+ ASSERT_EQUAL_64(0x0000000f0000000fUL, x14);
+ ASSERT_EQUAL_64(0x0000000f, x15);
+ ASSERT_EQUAL_64(0x0000000f0000000fUL, x18);
+ ASSERT_EQUAL_64(0, x24);
+ ASSERT_EQUAL_64(0x0000001f0000001fUL, x25);
+ ASSERT_EQUAL_64(0x0000001f0000001fUL, x26);
+ ASSERT_EQUAL_64(0, x27);
+
+ TEARDOWN();
+}
+
+
+TEST(csel_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x18, 0);
+ __ Mov(x19, 0x80000000);
+ __ Mov(x20, 0x8000000000000000UL);
+
+ __ Cmp(x18, Operand(0));
+ __ Csel(w0, w19, -2, ne);
+ __ Csel(w1, w19, -1, ne);
+ __ Csel(w2, w19, 0, ne);
+ __ Csel(w3, w19, 1, ne);
+ __ Csel(w4, w19, 2, ne);
+ __ Csel(w5, w19, Operand(w19, ASR, 31), ne);
+ __ Csel(w6, w19, Operand(w19, ROR, 1), ne);
+ __ Csel(w7, w19, 3, eq);
+
+ __ Csel(x8, x20, -2, ne);
+ __ Csel(x9, x20, -1, ne);
+ __ Csel(x10, x20, 0, ne);
+ __ Csel(x11, x20, 1, ne);
+ __ Csel(x12, x20, 2, ne);
+ __ Csel(x13, x20, Operand(x20, ASR, 63), ne);
+ __ Csel(x14, x20, Operand(x20, ROR, 1), ne);
+ __ Csel(x15, x20, 3, eq);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(-2, w0);
+ ASSERT_EQUAL_32(-1, w1);
+ ASSERT_EQUAL_32(0, w2);
+ ASSERT_EQUAL_32(1, w3);
+ ASSERT_EQUAL_32(2, w4);
+ ASSERT_EQUAL_32(-1, w5);
+ ASSERT_EQUAL_32(0x40000000, w6);
+ ASSERT_EQUAL_32(0x80000000, w7);
+
+ ASSERT_EQUAL_64(-2, x8);
+ ASSERT_EQUAL_64(-1, x9);
+ ASSERT_EQUAL_64(0, x10);
+ ASSERT_EQUAL_64(1, x11);
+ ASSERT_EQUAL_64(2, x12);
+ ASSERT_EQUAL_64(-1, x13);
+ ASSERT_EQUAL_64(0x4000000000000000UL, x14);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x15);
+
+ TEARDOWN();
+}
+
+
+TEST(lslv) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t value = 0x0123456789abcdefUL;
+ int shift[] = {1, 3, 5, 9, 17, 33};
+
+ START();
+ __ Mov(x0, value);
+ __ Mov(w1, shift[0]);
+ __ Mov(w2, shift[1]);
+ __ Mov(w3, shift[2]);
+ __ Mov(w4, shift[3]);
+ __ Mov(w5, shift[4]);
+ __ Mov(w6, shift[5]);
+
+ __ lslv(x0, x0, xzr);
+
+ __ Lsl(x16, x0, x1);
+ __ Lsl(x17, x0, x2);
+ __ Lsl(x18, x0, x3);
+ __ Lsl(x19, x0, x4);
+ __ Lsl(x20, x0, x5);
+ __ Lsl(x21, x0, x6);
+
+ __ Lsl(w22, w0, w1);
+ __ Lsl(w23, w0, w2);
+ __ Lsl(w24, w0, w3);
+ __ Lsl(w25, w0, w4);
+ __ Lsl(w26, w0, w5);
+ __ Lsl(w27, w0, w6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(value, x0);
+ ASSERT_EQUAL_64(value << (shift[0] & 63), x16);
+ ASSERT_EQUAL_64(value << (shift[1] & 63), x17);
+ ASSERT_EQUAL_64(value << (shift[2] & 63), x18);
+ ASSERT_EQUAL_64(value << (shift[3] & 63), x19);
+ ASSERT_EQUAL_64(value << (shift[4] & 63), x20);
+ ASSERT_EQUAL_64(value << (shift[5] & 63), x21);
+ ASSERT_EQUAL_32(value << (shift[0] & 31), w22);
+ ASSERT_EQUAL_32(value << (shift[1] & 31), w23);
+ ASSERT_EQUAL_32(value << (shift[2] & 31), w24);
+ ASSERT_EQUAL_32(value << (shift[3] & 31), w25);
+ ASSERT_EQUAL_32(value << (shift[4] & 31), w26);
+ ASSERT_EQUAL_32(value << (shift[5] & 31), w27);
+
+ TEARDOWN();
+}
+
+
+TEST(lsrv) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t value = 0x0123456789abcdefUL;
+ int shift[] = {1, 3, 5, 9, 17, 33};
+
+ START();
+ __ Mov(x0, value);
+ __ Mov(w1, shift[0]);
+ __ Mov(w2, shift[1]);
+ __ Mov(w3, shift[2]);
+ __ Mov(w4, shift[3]);
+ __ Mov(w5, shift[4]);
+ __ Mov(w6, shift[5]);
+
+ __ lsrv(x0, x0, xzr);
+
+ __ Lsr(x16, x0, x1);
+ __ Lsr(x17, x0, x2);
+ __ Lsr(x18, x0, x3);
+ __ Lsr(x19, x0, x4);
+ __ Lsr(x20, x0, x5);
+ __ Lsr(x21, x0, x6);
+
+ __ Lsr(w22, w0, w1);
+ __ Lsr(w23, w0, w2);
+ __ Lsr(w24, w0, w3);
+ __ Lsr(w25, w0, w4);
+ __ Lsr(w26, w0, w5);
+ __ Lsr(w27, w0, w6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(value, x0);
+ ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
+ ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
+ ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
+ ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
+ ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
+ ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
+
+ value &= 0xffffffffUL;
+ ASSERT_EQUAL_32(value >> (shift[0] & 31), w22);
+ ASSERT_EQUAL_32(value >> (shift[1] & 31), w23);
+ ASSERT_EQUAL_32(value >> (shift[2] & 31), w24);
+ ASSERT_EQUAL_32(value >> (shift[3] & 31), w25);
+ ASSERT_EQUAL_32(value >> (shift[4] & 31), w26);
+ ASSERT_EQUAL_32(value >> (shift[5] & 31), w27);
+
+ TEARDOWN();
+}
+
+
+TEST(asrv) {
+ INIT_V8();
+ SETUP();
+
+ int64_t value = 0xfedcba98fedcba98UL;
+ int shift[] = {1, 3, 5, 9, 17, 33};
+
+ START();
+ __ Mov(x0, value);
+ __ Mov(w1, shift[0]);
+ __ Mov(w2, shift[1]);
+ __ Mov(w3, shift[2]);
+ __ Mov(w4, shift[3]);
+ __ Mov(w5, shift[4]);
+ __ Mov(w6, shift[5]);
+
+ __ asrv(x0, x0, xzr);
+
+ __ Asr(x16, x0, x1);
+ __ Asr(x17, x0, x2);
+ __ Asr(x18, x0, x3);
+ __ Asr(x19, x0, x4);
+ __ Asr(x20, x0, x5);
+ __ Asr(x21, x0, x6);
+
+ __ Asr(w22, w0, w1);
+ __ Asr(w23, w0, w2);
+ __ Asr(w24, w0, w3);
+ __ Asr(w25, w0, w4);
+ __ Asr(w26, w0, w5);
+ __ Asr(w27, w0, w6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(value, x0);
+ ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
+ ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
+ ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
+ ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
+ ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
+ ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
+
+ int32_t value32 = static_cast<int32_t>(value & 0xffffffffUL);
+ ASSERT_EQUAL_32(value32 >> (shift[0] & 31), w22);
+ ASSERT_EQUAL_32(value32 >> (shift[1] & 31), w23);
+ ASSERT_EQUAL_32(value32 >> (shift[2] & 31), w24);
+ ASSERT_EQUAL_32(value32 >> (shift[3] & 31), w25);
+ ASSERT_EQUAL_32(value32 >> (shift[4] & 31), w26);
+ ASSERT_EQUAL_32(value32 >> (shift[5] & 31), w27);
+
+ TEARDOWN();
+}
+
+
+TEST(rorv) {
+ INIT_V8();
+ SETUP();
+
+ uint64_t value = 0x0123456789abcdefUL;
+ int shift[] = {4, 8, 12, 16, 24, 36};
+
+ START();
+ __ Mov(x0, value);
+ __ Mov(w1, shift[0]);
+ __ Mov(w2, shift[1]);
+ __ Mov(w3, shift[2]);
+ __ Mov(w4, shift[3]);
+ __ Mov(w5, shift[4]);
+ __ Mov(w6, shift[5]);
+
+ __ rorv(x0, x0, xzr);
+
+ __ Ror(x16, x0, x1);
+ __ Ror(x17, x0, x2);
+ __ Ror(x18, x0, x3);
+ __ Ror(x19, x0, x4);
+ __ Ror(x20, x0, x5);
+ __ Ror(x21, x0, x6);
+
+ __ Ror(w22, w0, w1);
+ __ Ror(w23, w0, w2);
+ __ Ror(w24, w0, w3);
+ __ Ror(w25, w0, w4);
+ __ Ror(w26, w0, w5);
+ __ Ror(w27, w0, w6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(value, x0);
+ ASSERT_EQUAL_64(0xf0123456789abcdeUL, x16);
+ ASSERT_EQUAL_64(0xef0123456789abcdUL, x17);
+ ASSERT_EQUAL_64(0xdef0123456789abcUL, x18);
+ ASSERT_EQUAL_64(0xcdef0123456789abUL, x19);
+ ASSERT_EQUAL_64(0xabcdef0123456789UL, x20);
+ ASSERT_EQUAL_64(0x789abcdef0123456UL, x21);
+ ASSERT_EQUAL_32(0xf89abcde, w22);
+ ASSERT_EQUAL_32(0xef89abcd, w23);
+ ASSERT_EQUAL_32(0xdef89abc, w24);
+ ASSERT_EQUAL_32(0xcdef89ab, w25);
+ ASSERT_EQUAL_32(0xabcdef89, w26);
+ ASSERT_EQUAL_32(0xf89abcde, w27);
+
+ TEARDOWN();
+}
+
+
+TEST(bfm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0x0123456789abcdefL);
+
+ __ Mov(x10, 0x8888888888888888L);
+ __ Mov(x11, 0x8888888888888888L);
+ __ Mov(x12, 0x8888888888888888L);
+ __ Mov(x13, 0x8888888888888888L);
+ __ Mov(w20, 0x88888888);
+ __ Mov(w21, 0x88888888);
+
+ __ bfm(x10, x1, 16, 31);
+ __ bfm(x11, x1, 32, 15);
+
+ __ bfm(w20, w1, 16, 23);
+ __ bfm(w21, w1, 24, 15);
+
+ // Aliases.
+ __ Bfi(x12, x1, 16, 8);
+ __ Bfxil(x13, x1, 16, 8);
+ END();
+
+ RUN();
+
+
+ ASSERT_EQUAL_64(0x88888888888889abL, x10);
+ ASSERT_EQUAL_64(0x8888cdef88888888L, x11);
+
+ ASSERT_EQUAL_32(0x888888ab, w20);
+ ASSERT_EQUAL_32(0x88cdef88, w21);
+
+ ASSERT_EQUAL_64(0x8888888888ef8888L, x12);
+ ASSERT_EQUAL_64(0x88888888888888abL, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(sbfm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+
+ __ sbfm(x10, x1, 16, 31);
+ __ sbfm(x11, x1, 32, 15);
+ __ sbfm(x12, x1, 32, 47);
+ __ sbfm(x13, x1, 48, 35);
+
+ __ sbfm(w14, w1, 16, 23);
+ __ sbfm(w15, w1, 24, 15);
+ __ sbfm(w16, w2, 16, 23);
+ __ sbfm(w17, w2, 24, 15);
+
+ // Aliases.
+ __ Asr(x18, x1, 32);
+ __ Asr(x19, x2, 32);
+ __ Sbfiz(x20, x1, 8, 16);
+ __ Sbfiz(x21, x2, 8, 16);
+ __ Sbfx(x22, x1, 8, 16);
+ __ Sbfx(x23, x2, 8, 16);
+ __ Sxtb(x24, w1);
+ __ Sxtb(x25, x2);
+ __ Sxth(x26, w1);
+ __ Sxth(x27, x2);
+ __ Sxtw(x28, w1);
+ __ Sxtw(x29, x2);
+ END();
+
+ RUN();
+
+
+ ASSERT_EQUAL_64(0xffffffffffff89abL, x10);
+ ASSERT_EQUAL_64(0xffffcdef00000000L, x11);
+ ASSERT_EQUAL_64(0x4567L, x12);
+ ASSERT_EQUAL_64(0x789abcdef0000L, x13);
+
+ ASSERT_EQUAL_32(0xffffffab, w14);
+ ASSERT_EQUAL_32(0xffcdef00, w15);
+ ASSERT_EQUAL_32(0x54, w16);
+ ASSERT_EQUAL_32(0x00321000, w17);
+
+ ASSERT_EQUAL_64(0x01234567L, x18);
+ ASSERT_EQUAL_64(0xfffffffffedcba98L, x19);
+ ASSERT_EQUAL_64(0xffffffffffcdef00L, x20);
+ ASSERT_EQUAL_64(0x321000L, x21);
+ ASSERT_EQUAL_64(0xffffffffffffabcdL, x22);
+ ASSERT_EQUAL_64(0x5432L, x23);
+ ASSERT_EQUAL_64(0xffffffffffffffefL, x24);
+ ASSERT_EQUAL_64(0x10, x25);
+ ASSERT_EQUAL_64(0xffffffffffffcdefL, x26);
+ ASSERT_EQUAL_64(0x3210, x27);
+ ASSERT_EQUAL_64(0xffffffff89abcdefL, x28);
+ ASSERT_EQUAL_64(0x76543210, x29);
+
+ TEARDOWN();
+}
+
+
+TEST(ubfm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+
+ __ Mov(x10, 0x8888888888888888L);
+ __ Mov(x11, 0x8888888888888888L);
+
+ __ ubfm(x10, x1, 16, 31);
+ __ ubfm(x11, x1, 32, 15);
+ __ ubfm(x12, x1, 32, 47);
+ __ ubfm(x13, x1, 48, 35);
+
+ __ ubfm(w25, w1, 16, 23);
+ __ ubfm(w26, w1, 24, 15);
+ __ ubfm(w27, w2, 16, 23);
+ __ ubfm(w28, w2, 24, 15);
+
+ // Aliases
+ __ Lsl(x15, x1, 63);
+ __ Lsl(x16, x1, 0);
+ __ Lsr(x17, x1, 32);
+ __ Ubfiz(x18, x1, 8, 16);
+ __ Ubfx(x19, x1, 8, 16);
+ __ Uxtb(x20, x1);
+ __ Uxth(x21, x1);
+ __ Uxtw(x22, x1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x00000000000089abL, x10);
+ ASSERT_EQUAL_64(0x0000cdef00000000L, x11);
+ ASSERT_EQUAL_64(0x4567L, x12);
+ ASSERT_EQUAL_64(0x789abcdef0000L, x13);
+
+ ASSERT_EQUAL_32(0x000000ab, w25);
+ ASSERT_EQUAL_32(0x00cdef00, w26);
+ ASSERT_EQUAL_32(0x54, w27);
+ ASSERT_EQUAL_32(0x00321000, w28);
+
+ ASSERT_EQUAL_64(0x8000000000000000L, x15);
+ ASSERT_EQUAL_64(0x0123456789abcdefL, x16);
+ ASSERT_EQUAL_64(0x01234567L, x17);
+ ASSERT_EQUAL_64(0xcdef00L, x18);
+ ASSERT_EQUAL_64(0xabcdL, x19);
+ ASSERT_EQUAL_64(0xefL, x20);
+ ASSERT_EQUAL_64(0xcdefL, x21);
+ ASSERT_EQUAL_64(0x89abcdefL, x22);
+
+ TEARDOWN();
+}
+
+
+TEST(extr) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x1, 0x0123456789abcdefL);
+ __ Mov(x2, 0xfedcba9876543210L);
+
+ __ Extr(w10, w1, w2, 0);
+ __ Extr(w11, w1, w2, 1);
+ __ Extr(x12, x2, x1, 2);
+
+ __ Ror(w13, w1, 0);
+ __ Ror(w14, w2, 17);
+ __ Ror(w15, w1, 31);
+ __ Ror(x18, x2, 1);
+ __ Ror(x19, x1, 63);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x76543210, x10);
+ ASSERT_EQUAL_64(0xbb2a1908, x11);
+ ASSERT_EQUAL_64(0x0048d159e26af37bUL, x12);
+ ASSERT_EQUAL_64(0x89abcdef, x13);
+ ASSERT_EQUAL_64(0x19083b2a, x14);
+ ASSERT_EQUAL_64(0x13579bdf, x15);
+ ASSERT_EQUAL_64(0x7f6e5d4c3b2a1908UL, x18);
+ ASSERT_EQUAL_64(0x02468acf13579bdeUL, x19);
+
+ TEARDOWN();
+}
+
+
+TEST(fmov_imm) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s11, 1.0);
+ __ Fmov(d22, -13.0);
+ __ Fmov(s1, 255.0);
+ __ Fmov(d2, 12.34567);
+ __ Fmov(s3, 0.0);
+ __ Fmov(d4, 0.0);
+ __ Fmov(s5, kFP32PositiveInfinity);
+ __ Fmov(d6, kFP64NegativeInfinity);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s11);
+ ASSERT_EQUAL_FP64(-13.0, d22);
+ ASSERT_EQUAL_FP32(255.0, s1);
+ ASSERT_EQUAL_FP64(12.34567, d2);
+ ASSERT_EQUAL_FP32(0.0, s3);
+ ASSERT_EQUAL_FP64(0.0, d4);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d6);
+
+ TEARDOWN();
+}
+
+
+TEST(fmov_reg) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s20, 1.0);
+ __ Fmov(w10, s20);
+ __ Fmov(s30, w10);
+ __ Fmov(s5, s20);
+ __ Fmov(d1, -13.0);
+ __ Fmov(x1, d1);
+ __ Fmov(d2, x1);
+ __ Fmov(d4, d1);
+ __ Fmov(d6, rawbits_to_double(0x0123456789abcdefL));
+ __ Fmov(s6, s6);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(float_to_rawbits(1.0), w10);
+ ASSERT_EQUAL_FP32(1.0, s30);
+ ASSERT_EQUAL_FP32(1.0, s5);
+ ASSERT_EQUAL_64(double_to_rawbits(-13.0), x1);
+ ASSERT_EQUAL_FP64(-13.0, d2);
+ ASSERT_EQUAL_FP64(-13.0, d4);
+ ASSERT_EQUAL_FP32(rawbits_to_float(0x89abcdef), s6);
+
+ TEARDOWN();
+}
+
+
+TEST(fadd) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s14, -0.0f);
+ __ Fmov(s15, kFP32PositiveInfinity);
+ __ Fmov(s16, kFP32NegativeInfinity);
+ __ Fmov(s17, 3.25f);
+ __ Fmov(s18, 1.0f);
+ __ Fmov(s19, 0.0f);
+
+ __ Fmov(d26, -0.0);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0.0);
+ __ Fmov(d30, -2.0);
+ __ Fmov(d31, 2.25);
+
+ __ Fadd(s0, s17, s18);
+ __ Fadd(s1, s18, s19);
+ __ Fadd(s2, s14, s18);
+ __ Fadd(s3, s15, s18);
+ __ Fadd(s4, s16, s18);
+ __ Fadd(s5, s15, s16);
+ __ Fadd(s6, s16, s15);
+
+ __ Fadd(d7, d30, d31);
+ __ Fadd(d8, d29, d31);
+ __ Fadd(d9, d26, d31);
+ __ Fadd(d10, d27, d31);
+ __ Fadd(d11, d28, d31);
+ __ Fadd(d12, d27, d28);
+ __ Fadd(d13, d28, d27);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(4.25, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(1.0, s2);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
+ ASSERT_EQUAL_FP64(0.25, d7);
+ ASSERT_EQUAL_FP64(2.25, d8);
+ ASSERT_EQUAL_FP64(2.25, d9);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d10);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d11);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
+
+ TEARDOWN();
+}
+
+
+TEST(fsub) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s14, -0.0f);
+ __ Fmov(s15, kFP32PositiveInfinity);
+ __ Fmov(s16, kFP32NegativeInfinity);
+ __ Fmov(s17, 3.25f);
+ __ Fmov(s18, 1.0f);
+ __ Fmov(s19, 0.0f);
+
+ __ Fmov(d26, -0.0);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0.0);
+ __ Fmov(d30, -2.0);
+ __ Fmov(d31, 2.25);
+
+ __ Fsub(s0, s17, s18);
+ __ Fsub(s1, s18, s19);
+ __ Fsub(s2, s14, s18);
+ __ Fsub(s3, s18, s15);
+ __ Fsub(s4, s18, s16);
+ __ Fsub(s5, s15, s15);
+ __ Fsub(s6, s16, s16);
+
+ __ Fsub(d7, d30, d31);
+ __ Fsub(d8, d29, d31);
+ __ Fsub(d9, d26, d31);
+ __ Fsub(d10, d31, d27);
+ __ Fsub(d11, d31, d28);
+ __ Fsub(d12, d27, d27);
+ __ Fsub(d13, d28, d28);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(2.25, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(-1.0, s2);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
+ ASSERT_EQUAL_FP64(-4.25, d7);
+ ASSERT_EQUAL_FP64(-2.25, d8);
+ ASSERT_EQUAL_FP64(-2.25, d9);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
+
+ TEARDOWN();
+}
+
+
+TEST(fmul) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s14, -0.0f);
+ __ Fmov(s15, kFP32PositiveInfinity);
+ __ Fmov(s16, kFP32NegativeInfinity);
+ __ Fmov(s17, 3.25f);
+ __ Fmov(s18, 2.0f);
+ __ Fmov(s19, 0.0f);
+ __ Fmov(s20, -2.0f);
+
+ __ Fmov(d26, -0.0);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0.0);
+ __ Fmov(d30, -2.0);
+ __ Fmov(d31, 2.25);
+
+ __ Fmul(s0, s17, s18);
+ __ Fmul(s1, s18, s19);
+ __ Fmul(s2, s14, s14);
+ __ Fmul(s3, s15, s20);
+ __ Fmul(s4, s16, s20);
+ __ Fmul(s5, s15, s19);
+ __ Fmul(s6, s19, s16);
+
+ __ Fmul(d7, d30, d31);
+ __ Fmul(d8, d29, d31);
+ __ Fmul(d9, d26, d26);
+ __ Fmul(d10, d27, d30);
+ __ Fmul(d11, d28, d30);
+ __ Fmul(d12, d27, d29);
+ __ Fmul(d13, d29, d28);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(6.5, s0);
+ ASSERT_EQUAL_FP32(0.0, s1);
+ ASSERT_EQUAL_FP32(0.0, s2);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
+ ASSERT_EQUAL_FP64(-4.5, d7);
+ ASSERT_EQUAL_FP64(0.0, d8);
+ ASSERT_EQUAL_FP64(0.0, d9);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
+
+ TEARDOWN();
+}
+
+
+static void FmaddFmsubHelper(double n, double m, double a,
+ double fmadd, double fmsub,
+ double fnmadd, double fnmsub) {
+ SETUP();
+ START();
+
+ __ Fmov(d0, n);
+ __ Fmov(d1, m);
+ __ Fmov(d2, a);
+ __ Fmadd(d28, d0, d1, d2);
+ __ Fmsub(d29, d0, d1, d2);
+ __ Fnmadd(d30, d0, d1, d2);
+ __ Fnmsub(d31, d0, d1, d2);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_FP64(fmadd, d28);
+ ASSERT_EQUAL_FP64(fmsub, d29);
+ ASSERT_EQUAL_FP64(fnmadd, d30);
+ ASSERT_EQUAL_FP64(fnmsub, d31);
+
+ TEARDOWN();
+}
+
+
+TEST(fmadd_fmsub_double) {
+ INIT_V8();
+
+ // It's hard to check the result of fused operations because the only way to
+ // calculate the result is using fma, which is what the simulator uses anyway.
+ // TODO(jbramley): Add tests to check behaviour against a hardware trace.
+
+ // Basic operation.
+ FmaddFmsubHelper(1.0, 2.0, 3.0, 5.0, 1.0, -5.0, -1.0);
+ FmaddFmsubHelper(-1.0, 2.0, 3.0, 1.0, 5.0, -1.0, -5.0);
+
+ // Check the sign of exact zeroes.
+ // n m a fmadd fmsub fnmadd fnmsub
+ FmaddFmsubHelper(-0.0, +0.0, -0.0, -0.0, +0.0, +0.0, +0.0);
+ FmaddFmsubHelper(+0.0, +0.0, -0.0, +0.0, -0.0, +0.0, +0.0);
+ FmaddFmsubHelper(+0.0, +0.0, +0.0, +0.0, +0.0, -0.0, +0.0);
+ FmaddFmsubHelper(-0.0, +0.0, +0.0, +0.0, +0.0, +0.0, -0.0);
+ FmaddFmsubHelper(+0.0, -0.0, -0.0, -0.0, +0.0, +0.0, +0.0);
+ FmaddFmsubHelper(-0.0, -0.0, -0.0, +0.0, -0.0, +0.0, +0.0);
+ FmaddFmsubHelper(-0.0, -0.0, +0.0, +0.0, +0.0, -0.0, +0.0);
+ FmaddFmsubHelper(+0.0, -0.0, +0.0, +0.0, +0.0, +0.0, -0.0);
+
+ // Check NaN generation.
+ FmaddFmsubHelper(kFP64PositiveInfinity, 0.0, 42.0,
+ kFP64DefaultNaN, kFP64DefaultNaN,
+ kFP64DefaultNaN, kFP64DefaultNaN);
+ FmaddFmsubHelper(0.0, kFP64PositiveInfinity, 42.0,
+ kFP64DefaultNaN, kFP64DefaultNaN,
+ kFP64DefaultNaN, kFP64DefaultNaN);
+ FmaddFmsubHelper(kFP64PositiveInfinity, 1.0, kFP64PositiveInfinity,
+ kFP64PositiveInfinity, // inf + ( inf * 1) = inf
+ kFP64DefaultNaN, // inf + (-inf * 1) = NaN
+ kFP64NegativeInfinity, // -inf + (-inf * 1) = -inf
+ kFP64DefaultNaN); // -inf + ( inf * 1) = NaN
+ FmaddFmsubHelper(kFP64NegativeInfinity, 1.0, kFP64PositiveInfinity,
+ kFP64DefaultNaN, // inf + (-inf * 1) = NaN
+ kFP64PositiveInfinity, // inf + ( inf * 1) = inf
+ kFP64DefaultNaN, // -inf + ( inf * 1) = NaN
+ kFP64NegativeInfinity); // -inf + (-inf * 1) = -inf
+}
+
+
+static void FmaddFmsubHelper(float n, float m, float a,
+ float fmadd, float fmsub,
+ float fnmadd, float fnmsub) {
+ SETUP();
+ START();
+
+ __ Fmov(s0, n);
+ __ Fmov(s1, m);
+ __ Fmov(s2, a);
+ __ Fmadd(s28, s0, s1, s2);
+ __ Fmsub(s29, s0, s1, s2);
+ __ Fnmadd(s30, s0, s1, s2);
+ __ Fnmsub(s31, s0, s1, s2);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_FP32(fmadd, s28);
+ ASSERT_EQUAL_FP32(fmsub, s29);
+ ASSERT_EQUAL_FP32(fnmadd, s30);
+ ASSERT_EQUAL_FP32(fnmsub, s31);
+
+ TEARDOWN();
+}
+
+
+TEST(fmadd_fmsub_float) {
+ INIT_V8();
+ // It's hard to check the result of fused operations because the only way to
+ // calculate the result is using fma, which is what the simulator uses anyway.
+ // TODO(jbramley): Add tests to check behaviour against a hardware trace.
+
+ // Basic operation.
+ FmaddFmsubHelper(1.0f, 2.0f, 3.0f, 5.0f, 1.0f, -5.0f, -1.0f);
+ FmaddFmsubHelper(-1.0f, 2.0f, 3.0f, 1.0f, 5.0f, -1.0f, -5.0f);
+
+ // Check the sign of exact zeroes.
+ // n m a fmadd fmsub fnmadd fnmsub
+ FmaddFmsubHelper(-0.0f, +0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
+ FmaddFmsubHelper(+0.0f, +0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
+ FmaddFmsubHelper(+0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
+ FmaddFmsubHelper(-0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
+ FmaddFmsubHelper(+0.0f, -0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
+ FmaddFmsubHelper(-0.0f, -0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
+ FmaddFmsubHelper(-0.0f, -0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
+ FmaddFmsubHelper(+0.0f, -0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
+
+ // Check NaN generation.
+ FmaddFmsubHelper(kFP32PositiveInfinity, 0.0f, 42.0f,
+ kFP32DefaultNaN, kFP32DefaultNaN,
+ kFP32DefaultNaN, kFP32DefaultNaN);
+ FmaddFmsubHelper(0.0f, kFP32PositiveInfinity, 42.0f,
+ kFP32DefaultNaN, kFP32DefaultNaN,
+ kFP32DefaultNaN, kFP32DefaultNaN);
+ FmaddFmsubHelper(kFP32PositiveInfinity, 1.0f, kFP32PositiveInfinity,
+ kFP32PositiveInfinity, // inf + ( inf * 1) = inf
+ kFP32DefaultNaN, // inf + (-inf * 1) = NaN
+ kFP32NegativeInfinity, // -inf + (-inf * 1) = -inf
+ kFP32DefaultNaN); // -inf + ( inf * 1) = NaN
+ FmaddFmsubHelper(kFP32NegativeInfinity, 1.0f, kFP32PositiveInfinity,
+ kFP32DefaultNaN, // inf + (-inf * 1) = NaN
+ kFP32PositiveInfinity, // inf + ( inf * 1) = inf
+ kFP32DefaultNaN, // -inf + ( inf * 1) = NaN
+ kFP32NegativeInfinity); // -inf + (-inf * 1) = -inf
+}
+
+
+TEST(fmadd_fmsub_double_nans) {
+ INIT_V8();
+ // Make sure that NaN propagation works correctly.
+ double s1 = rawbits_to_double(0x7ff5555511111111);
+ double s2 = rawbits_to_double(0x7ff5555522222222);
+ double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
+ double q1 = rawbits_to_double(0x7ffaaaaa11111111);
+ double q2 = rawbits_to_double(0x7ffaaaaa22222222);
+ double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
+ ASSERT(IsSignallingNaN(s1));
+ ASSERT(IsSignallingNaN(s2));
+ ASSERT(IsSignallingNaN(sa));
+ ASSERT(IsQuietNaN(q1));
+ ASSERT(IsQuietNaN(q2));
+ ASSERT(IsQuietNaN(qa));
+
+ // The input NaNs after passing through ProcessNaN.
+ double s1_proc = rawbits_to_double(0x7ffd555511111111);
+ double s2_proc = rawbits_to_double(0x7ffd555522222222);
+ double sa_proc = rawbits_to_double(0x7ffd5555aaaaaaaa);
+ double q1_proc = q1;
+ double q2_proc = q2;
+ double qa_proc = qa;
+ ASSERT(IsQuietNaN(s1_proc));
+ ASSERT(IsQuietNaN(s2_proc));
+ ASSERT(IsQuietNaN(sa_proc));
+ ASSERT(IsQuietNaN(q1_proc));
+ ASSERT(IsQuietNaN(q2_proc));
+ ASSERT(IsQuietNaN(qa_proc));
+
+ // Negated NaNs as it would be done on ARMv8 hardware.
+ double s1_proc_neg = rawbits_to_double(0xfffd555511111111);
+ double sa_proc_neg = rawbits_to_double(0xfffd5555aaaaaaaa);
+ double q1_proc_neg = rawbits_to_double(0xfffaaaaa11111111);
+ double qa_proc_neg = rawbits_to_double(0xfffaaaaaaaaaaaaa);
+ ASSERT(IsQuietNaN(s1_proc_neg));
+ ASSERT(IsQuietNaN(sa_proc_neg));
+ ASSERT(IsQuietNaN(q1_proc_neg));
+ ASSERT(IsQuietNaN(qa_proc_neg));
+
+ // Quiet NaNs are propagated.
+ FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
+ FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
+ FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
+ FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
+ FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
+ FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
+ FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
+
+ // Signalling NaNs are propagated, and made quiet.
+ FmaddFmsubHelper(s1, 0, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
+ FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
+ FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, s2, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
+ FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+
+ // Signalling NaNs take precedence over quiet NaNs.
+ FmaddFmsubHelper(s1, q2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
+ FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
+ FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, s2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
+ FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+
+ // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
+ FmaddFmsubHelper(0, kFP64PositiveInfinity, qa,
+ kFP64DefaultNaN, kFP64DefaultNaN,
+ kFP64DefaultNaN, kFP64DefaultNaN);
+ FmaddFmsubHelper(kFP64PositiveInfinity, 0, qa,
+ kFP64DefaultNaN, kFP64DefaultNaN,
+ kFP64DefaultNaN, kFP64DefaultNaN);
+ FmaddFmsubHelper(0, kFP64NegativeInfinity, qa,
+ kFP64DefaultNaN, kFP64DefaultNaN,
+ kFP64DefaultNaN, kFP64DefaultNaN);
+ FmaddFmsubHelper(kFP64NegativeInfinity, 0, qa,
+ kFP64DefaultNaN, kFP64DefaultNaN,
+ kFP64DefaultNaN, kFP64DefaultNaN);
+}
+
+
+TEST(fmadd_fmsub_float_nans) {
+ INIT_V8();
+ // Make sure that NaN propagation works correctly.
+ float s1 = rawbits_to_float(0x7f951111);
+ float s2 = rawbits_to_float(0x7f952222);
+ float sa = rawbits_to_float(0x7f95aaaa);
+ float q1 = rawbits_to_float(0x7fea1111);
+ float q2 = rawbits_to_float(0x7fea2222);
+ float qa = rawbits_to_float(0x7feaaaaa);
+ ASSERT(IsSignallingNaN(s1));
+ ASSERT(IsSignallingNaN(s2));
+ ASSERT(IsSignallingNaN(sa));
+ ASSERT(IsQuietNaN(q1));
+ ASSERT(IsQuietNaN(q2));
+ ASSERT(IsQuietNaN(qa));
+
+ // The input NaNs after passing through ProcessNaN.
+ float s1_proc = rawbits_to_float(0x7fd51111);
+ float s2_proc = rawbits_to_float(0x7fd52222);
+ float sa_proc = rawbits_to_float(0x7fd5aaaa);
+ float q1_proc = q1;
+ float q2_proc = q2;
+ float qa_proc = qa;
+ ASSERT(IsQuietNaN(s1_proc));
+ ASSERT(IsQuietNaN(s2_proc));
+ ASSERT(IsQuietNaN(sa_proc));
+ ASSERT(IsQuietNaN(q1_proc));
+ ASSERT(IsQuietNaN(q2_proc));
+ ASSERT(IsQuietNaN(qa_proc));
+
+ // Negated NaNs as it would be done on ARMv8 hardware.
+ float s1_proc_neg = rawbits_to_float(0xffd51111);
+ float sa_proc_neg = rawbits_to_float(0xffd5aaaa);
+ float q1_proc_neg = rawbits_to_float(0xffea1111);
+ float qa_proc_neg = rawbits_to_float(0xffeaaaaa);
+ ASSERT(IsQuietNaN(s1_proc_neg));
+ ASSERT(IsQuietNaN(sa_proc_neg));
+ ASSERT(IsQuietNaN(q1_proc_neg));
+ ASSERT(IsQuietNaN(qa_proc_neg));
+
+ // Quiet NaNs are propagated.
+ FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
+ FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
+ FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
+ FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
+ FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
+ FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
+ FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
+
+ // Signalling NaNs are propagated, and made quiet.
+ FmaddFmsubHelper(s1, 0, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
+ FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
+ FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, s2, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
+ FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+
+ // Signalling NaNs take precedence over quiet NaNs.
+ FmaddFmsubHelper(s1, q2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
+ FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
+ FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, s2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
+ FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+ FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
+
+ // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
+ FmaddFmsubHelper(0, kFP32PositiveInfinity, qa,
+ kFP32DefaultNaN, kFP32DefaultNaN,
+ kFP32DefaultNaN, kFP32DefaultNaN);
+ FmaddFmsubHelper(kFP32PositiveInfinity, 0, qa,
+ kFP32DefaultNaN, kFP32DefaultNaN,
+ kFP32DefaultNaN, kFP32DefaultNaN);
+ FmaddFmsubHelper(0, kFP32NegativeInfinity, qa,
+ kFP32DefaultNaN, kFP32DefaultNaN,
+ kFP32DefaultNaN, kFP32DefaultNaN);
+ FmaddFmsubHelper(kFP32NegativeInfinity, 0, qa,
+ kFP32DefaultNaN, kFP32DefaultNaN,
+ kFP32DefaultNaN, kFP32DefaultNaN);
+}
+
+
+TEST(fdiv) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s14, -0.0f);
+ __ Fmov(s15, kFP32PositiveInfinity);
+ __ Fmov(s16, kFP32NegativeInfinity);
+ __ Fmov(s17, 3.25f);
+ __ Fmov(s18, 2.0f);
+ __ Fmov(s19, 2.0f);
+ __ Fmov(s20, -2.0f);
+
+ __ Fmov(d26, -0.0);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0.0);
+ __ Fmov(d30, -2.0);
+ __ Fmov(d31, 2.25);
+
+ __ Fdiv(s0, s17, s18);
+ __ Fdiv(s1, s18, s19);
+ __ Fdiv(s2, s14, s18);
+ __ Fdiv(s3, s18, s15);
+ __ Fdiv(s4, s18, s16);
+ __ Fdiv(s5, s15, s16);
+ __ Fdiv(s6, s14, s14);
+
+ __ Fdiv(d7, d31, d30);
+ __ Fdiv(d8, d29, d31);
+ __ Fdiv(d9, d26, d31);
+ __ Fdiv(d10, d31, d27);
+ __ Fdiv(d11, d31, d28);
+ __ Fdiv(d12, d28, d27);
+ __ Fdiv(d13, d29, d29);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.625f, s0);
+ ASSERT_EQUAL_FP32(1.0f, s1);
+ ASSERT_EQUAL_FP32(-0.0f, s2);
+ ASSERT_EQUAL_FP32(0.0f, s3);
+ ASSERT_EQUAL_FP32(-0.0f, s4);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
+ ASSERT_EQUAL_FP64(-1.125, d7);
+ ASSERT_EQUAL_FP64(0.0, d8);
+ ASSERT_EQUAL_FP64(-0.0, d9);
+ ASSERT_EQUAL_FP64(0.0, d10);
+ ASSERT_EQUAL_FP64(-0.0, d11);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
+
+ TEARDOWN();
+}
+
+
+static float MinMaxHelper(float n,
+ float m,
+ bool min,
+ float quiet_nan_substitute = 0.0) {
+ uint32_t raw_n = float_to_rawbits(n);
+ uint32_t raw_m = float_to_rawbits(m);
+
+ if (std::isnan(n) && ((raw_n & kSQuietNanMask) == 0)) {
+ // n is signalling NaN.
+ return rawbits_to_float(raw_n | kSQuietNanMask);
+ } else if (std::isnan(m) && ((raw_m & kSQuietNanMask) == 0)) {
+ // m is signalling NaN.
+ return rawbits_to_float(raw_m | kSQuietNanMask);
+ } else if (quiet_nan_substitute == 0.0) {
+ if (std::isnan(n)) {
+ // n is quiet NaN.
+ return n;
+ } else if (std::isnan(m)) {
+ // m is quiet NaN.
+ return m;
+ }
+ } else {
+ // Substitute n or m if one is quiet, but not both.
+ if (std::isnan(n) && !std::isnan(m)) {
+ // n is quiet NaN: replace with substitute.
+ n = quiet_nan_substitute;
+ } else if (!std::isnan(n) && std::isnan(m)) {
+ // m is quiet NaN: replace with substitute.
+ m = quiet_nan_substitute;
+ }
+ }
+
+ if ((n == 0.0) && (m == 0.0) &&
+ (copysign(1.0, n) != copysign(1.0, m))) {
+ return min ? -0.0 : 0.0;
+ }
+
+ return min ? fminf(n, m) : fmaxf(n, m);
+}
+
+
+static double MinMaxHelper(double n,
+ double m,
+ bool min,
+ double quiet_nan_substitute = 0.0) {
+ uint64_t raw_n = double_to_rawbits(n);
+ uint64_t raw_m = double_to_rawbits(m);
+
+ if (std::isnan(n) && ((raw_n & kDQuietNanMask) == 0)) {
+ // n is signalling NaN.
+ return rawbits_to_double(raw_n | kDQuietNanMask);
+ } else if (std::isnan(m) && ((raw_m & kDQuietNanMask) == 0)) {
+ // m is signalling NaN.
+ return rawbits_to_double(raw_m | kDQuietNanMask);
+ } else if (quiet_nan_substitute == 0.0) {
+ if (std::isnan(n)) {
+ // n is quiet NaN.
+ return n;
+ } else if (std::isnan(m)) {
+ // m is quiet NaN.
+ return m;
+ }
+ } else {
+ // Substitute n or m if one is quiet, but not both.
+ if (std::isnan(n) && !std::isnan(m)) {
+ // n is quiet NaN: replace with substitute.
+ n = quiet_nan_substitute;
+ } else if (!std::isnan(n) && std::isnan(m)) {
+ // m is quiet NaN: replace with substitute.
+ m = quiet_nan_substitute;
+ }
+ }
+
+ if ((n == 0.0) && (m == 0.0) &&
+ (copysign(1.0, n) != copysign(1.0, m))) {
+ return min ? -0.0 : 0.0;
+ }
+
+ return min ? fmin(n, m) : fmax(n, m);
+}
+
+
+static void FminFmaxDoubleHelper(double n, double m, double min, double max,
+ double minnm, double maxnm) {
+ SETUP();
+
+ START();
+ __ Fmov(d0, n);
+ __ Fmov(d1, m);
+ __ Fmin(d28, d0, d1);
+ __ Fmax(d29, d0, d1);
+ __ Fminnm(d30, d0, d1);
+ __ Fmaxnm(d31, d0, d1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP64(min, d28);
+ ASSERT_EQUAL_FP64(max, d29);
+ ASSERT_EQUAL_FP64(minnm, d30);
+ ASSERT_EQUAL_FP64(maxnm, d31);
+
+ TEARDOWN();
+}
+
+
+TEST(fmax_fmin_d) {
+ INIT_V8();
+ // Use non-standard NaNs to check that the payload bits are preserved.
+ double snan = rawbits_to_double(0x7ff5555512345678);
+ double qnan = rawbits_to_double(0x7ffaaaaa87654321);
+
+ double snan_processed = rawbits_to_double(0x7ffd555512345678);
+ double qnan_processed = qnan;
+
+ ASSERT(IsSignallingNaN(snan));
+ ASSERT(IsQuietNaN(qnan));
+ ASSERT(IsQuietNaN(snan_processed));
+ ASSERT(IsQuietNaN(qnan_processed));
+
+ // Bootstrap tests.
+ FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0);
+ FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1);
+ FminFmaxDoubleHelper(kFP64PositiveInfinity, kFP64NegativeInfinity,
+ kFP64NegativeInfinity, kFP64PositiveInfinity,
+ kFP64NegativeInfinity, kFP64PositiveInfinity);
+ FminFmaxDoubleHelper(snan, 0,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+ FminFmaxDoubleHelper(0, snan,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+ FminFmaxDoubleHelper(qnan, 0,
+ qnan_processed, qnan_processed,
+ 0, 0);
+ FminFmaxDoubleHelper(0, qnan,
+ qnan_processed, qnan_processed,
+ 0, 0);
+ FminFmaxDoubleHelper(qnan, snan,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+ FminFmaxDoubleHelper(snan, qnan,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+
+ // Iterate over all combinations of inputs.
+ double inputs[] = { DBL_MAX, DBL_MIN, 1.0, 0.0,
+ -DBL_MAX, -DBL_MIN, -1.0, -0.0,
+ kFP64PositiveInfinity, kFP64NegativeInfinity,
+ kFP64QuietNaN, kFP64SignallingNaN };
+
+ const int count = sizeof(inputs) / sizeof(inputs[0]);
+
+ for (int in = 0; in < count; in++) {
+ double n = inputs[in];
+ for (int im = 0; im < count; im++) {
+ double m = inputs[im];
+ FminFmaxDoubleHelper(n, m,
+ MinMaxHelper(n, m, true),
+ MinMaxHelper(n, m, false),
+ MinMaxHelper(n, m, true, kFP64PositiveInfinity),
+ MinMaxHelper(n, m, false, kFP64NegativeInfinity));
+ }
+ }
+}
+
+
+static void FminFmaxFloatHelper(float n, float m, float min, float max,
+ float minnm, float maxnm) {
+ SETUP();
+
+ START();
+ __ Fmov(s0, n);
+ __ Fmov(s1, m);
+ __ Fmin(s28, s0, s1);
+ __ Fmax(s29, s0, s1);
+ __ Fminnm(s30, s0, s1);
+ __ Fmaxnm(s31, s0, s1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(min, s28);
+ ASSERT_EQUAL_FP32(max, s29);
+ ASSERT_EQUAL_FP32(minnm, s30);
+ ASSERT_EQUAL_FP32(maxnm, s31);
+
+ TEARDOWN();
+}
+
+
+TEST(fmax_fmin_s) {
+ INIT_V8();
+ // Use non-standard NaNs to check that the payload bits are preserved.
+ float snan = rawbits_to_float(0x7f951234);
+ float qnan = rawbits_to_float(0x7fea8765);
+
+ float snan_processed = rawbits_to_float(0x7fd51234);
+ float qnan_processed = qnan;
+
+ ASSERT(IsSignallingNaN(snan));
+ ASSERT(IsQuietNaN(qnan));
+ ASSERT(IsQuietNaN(snan_processed));
+ ASSERT(IsQuietNaN(qnan_processed));
+
+ // Bootstrap tests.
+ FminFmaxFloatHelper(0, 0, 0, 0, 0, 0);
+ FminFmaxFloatHelper(0, 1, 0, 1, 0, 1);
+ FminFmaxFloatHelper(kFP32PositiveInfinity, kFP32NegativeInfinity,
+ kFP32NegativeInfinity, kFP32PositiveInfinity,
+ kFP32NegativeInfinity, kFP32PositiveInfinity);
+ FminFmaxFloatHelper(snan, 0,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+ FminFmaxFloatHelper(0, snan,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+ FminFmaxFloatHelper(qnan, 0,
+ qnan_processed, qnan_processed,
+ 0, 0);
+ FminFmaxFloatHelper(0, qnan,
+ qnan_processed, qnan_processed,
+ 0, 0);
+ FminFmaxFloatHelper(qnan, snan,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+ FminFmaxFloatHelper(snan, qnan,
+ snan_processed, snan_processed,
+ snan_processed, snan_processed);
+
+ // Iterate over all combinations of inputs.
+ float inputs[] = { FLT_MAX, FLT_MIN, 1.0, 0.0,
+ -FLT_MAX, -FLT_MIN, -1.0, -0.0,
+ kFP32PositiveInfinity, kFP32NegativeInfinity,
+ kFP32QuietNaN, kFP32SignallingNaN };
+
+ const int count = sizeof(inputs) / sizeof(inputs[0]);
+
+ for (int in = 0; in < count; in++) {
+ float n = inputs[in];
+ for (int im = 0; im < count; im++) {
+ float m = inputs[im];
+ FminFmaxFloatHelper(n, m,
+ MinMaxHelper(n, m, true),
+ MinMaxHelper(n, m, false),
+ MinMaxHelper(n, m, true, kFP32PositiveInfinity),
+ MinMaxHelper(n, m, false, kFP32NegativeInfinity));
+ }
+ }
+}
+
+
+TEST(fccmp) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 0.0);
+ __ Fmov(s17, 0.5);
+ __ Fmov(d18, -0.5);
+ __ Fmov(d19, -1.0);
+ __ Mov(x20, 0);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(s16, s16, NoFlag, eq);
+ __ Mrs(x0, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(s16, s16, VFlag, ne);
+ __ Mrs(x1, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(s16, s17, CFlag, ge);
+ __ Mrs(x2, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(s16, s17, CVFlag, lt);
+ __ Mrs(x3, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(d18, d18, ZFlag, le);
+ __ Mrs(x4, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(d18, d18, ZVFlag, gt);
+ __ Mrs(x5, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(d18, d19, ZCVFlag, ls);
+ __ Mrs(x6, NZCV);
+
+ __ Cmp(x20, 0);
+ __ Fccmp(d18, d19, NFlag, hi);
+ __ Mrs(x7, NZCV);
+
+ __ fccmp(s16, s16, NFlag, al);
+ __ Mrs(x8, NZCV);
+
+ __ fccmp(d18, d18, NFlag, nv);
+ __ Mrs(x9, NZCV);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(VFlag, w1);
+ ASSERT_EQUAL_32(NFlag, w2);
+ ASSERT_EQUAL_32(CVFlag, w3);
+ ASSERT_EQUAL_32(ZCFlag, w4);
+ ASSERT_EQUAL_32(ZVFlag, w5);
+ ASSERT_EQUAL_32(CFlag, w6);
+ ASSERT_EQUAL_32(NFlag, w7);
+ ASSERT_EQUAL_32(ZCFlag, w8);
+ ASSERT_EQUAL_32(ZCFlag, w9);
+
+ TEARDOWN();
+}
+
+
+TEST(fcmp) {
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ // Some of these tests require a floating-point scratch register assigned to
+ // the macro assembler, but most do not.
+ {
+ // We're going to mess around with the available scratch registers in this
+ // test. A UseScratchRegisterScope will make sure that they are restored to
+ // the default values once we're finished.
+ UseScratchRegisterScope temps(&masm);
+ masm.FPTmpList()->set_list(0);
+
+ __ Fmov(s8, 0.0);
+ __ Fmov(s9, 0.5);
+ __ Mov(w18, 0x7f800001); // Single precision NaN.
+ __ Fmov(s18, w18);
+
+ __ Fcmp(s8, s8);
+ __ Mrs(x0, NZCV);
+ __ Fcmp(s8, s9);
+ __ Mrs(x1, NZCV);
+ __ Fcmp(s9, s8);
+ __ Mrs(x2, NZCV);
+ __ Fcmp(s8, s18);
+ __ Mrs(x3, NZCV);
+ __ Fcmp(s18, s18);
+ __ Mrs(x4, NZCV);
+ __ Fcmp(s8, 0.0);
+ __ Mrs(x5, NZCV);
+ masm.FPTmpList()->set_list(d0.Bit());
+ __ Fcmp(s8, 255.0);
+ masm.FPTmpList()->set_list(0);
+ __ Mrs(x6, NZCV);
+
+ __ Fmov(d19, 0.0);
+ __ Fmov(d20, 0.5);
+ __ Mov(x21, 0x7ff0000000000001UL); // Double precision NaN.
+ __ Fmov(d21, x21);
+
+ __ Fcmp(d19, d19);
+ __ Mrs(x10, NZCV);
+ __ Fcmp(d19, d20);
+ __ Mrs(x11, NZCV);
+ __ Fcmp(d20, d19);
+ __ Mrs(x12, NZCV);
+ __ Fcmp(d19, d21);
+ __ Mrs(x13, NZCV);
+ __ Fcmp(d21, d21);
+ __ Mrs(x14, NZCV);
+ __ Fcmp(d19, 0.0);
+ __ Mrs(x15, NZCV);
+ masm.FPTmpList()->set_list(d0.Bit());
+ __ Fcmp(d19, 12.3456);
+ masm.FPTmpList()->set_list(0);
+ __ Mrs(x16, NZCV);
+ }
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(ZCFlag, w0);
+ ASSERT_EQUAL_32(NFlag, w1);
+ ASSERT_EQUAL_32(CFlag, w2);
+ ASSERT_EQUAL_32(CVFlag, w3);
+ ASSERT_EQUAL_32(CVFlag, w4);
+ ASSERT_EQUAL_32(ZCFlag, w5);
+ ASSERT_EQUAL_32(NFlag, w6);
+ ASSERT_EQUAL_32(ZCFlag, w10);
+ ASSERT_EQUAL_32(NFlag, w11);
+ ASSERT_EQUAL_32(CFlag, w12);
+ ASSERT_EQUAL_32(CVFlag, w13);
+ ASSERT_EQUAL_32(CVFlag, w14);
+ ASSERT_EQUAL_32(ZCFlag, w15);
+ ASSERT_EQUAL_32(NFlag, w16);
+
+ TEARDOWN();
+}
+
+
+TEST(fcsel) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(x16, 0);
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 2.0);
+ __ Fmov(d18, 3.0);
+ __ Fmov(d19, 4.0);
+
+ __ Cmp(x16, 0);
+ __ Fcsel(s0, s16, s17, eq);
+ __ Fcsel(s1, s16, s17, ne);
+ __ Fcsel(d2, d18, d19, eq);
+ __ Fcsel(d3, d18, d19, ne);
+ __ fcsel(s4, s16, s17, al);
+ __ fcsel(d5, d18, d19, nv);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(2.0, s1);
+ ASSERT_EQUAL_FP64(3.0, d2);
+ ASSERT_EQUAL_FP64(4.0, d3);
+ ASSERT_EQUAL_FP32(1.0, s4);
+ ASSERT_EQUAL_FP64(3.0, d5);
+
+ TEARDOWN();
+}
+
+
+TEST(fneg) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 0.0);
+ __ Fmov(s18, kFP32PositiveInfinity);
+ __ Fmov(d19, 1.0);
+ __ Fmov(d20, 0.0);
+ __ Fmov(d21, kFP64PositiveInfinity);
+
+ __ Fneg(s0, s16);
+ __ Fneg(s1, s0);
+ __ Fneg(s2, s17);
+ __ Fneg(s3, s2);
+ __ Fneg(s4, s18);
+ __ Fneg(s5, s4);
+ __ Fneg(d6, d19);
+ __ Fneg(d7, d6);
+ __ Fneg(d8, d20);
+ __ Fneg(d9, d8);
+ __ Fneg(d10, d21);
+ __ Fneg(d11, d10);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(-1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(-0.0, s2);
+ ASSERT_EQUAL_FP32(0.0, s3);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
+ ASSERT_EQUAL_FP64(-1.0, d6);
+ ASSERT_EQUAL_FP64(1.0, d7);
+ ASSERT_EQUAL_FP64(-0.0, d8);
+ ASSERT_EQUAL_FP64(0.0, d9);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
+
+ TEARDOWN();
+}
+
+
+TEST(fabs) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, -1.0);
+ __ Fmov(s17, -0.0);
+ __ Fmov(s18, kFP32NegativeInfinity);
+ __ Fmov(d19, -1.0);
+ __ Fmov(d20, -0.0);
+ __ Fmov(d21, kFP64NegativeInfinity);
+
+ __ Fabs(s0, s16);
+ __ Fabs(s1, s0);
+ __ Fabs(s2, s17);
+ __ Fabs(s3, s18);
+ __ Fabs(d4, d19);
+ __ Fabs(d5, d4);
+ __ Fabs(d6, d20);
+ __ Fabs(d7, d21);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(0.0, s2);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
+ ASSERT_EQUAL_FP64(1.0, d4);
+ ASSERT_EQUAL_FP64(1.0, d5);
+ ASSERT_EQUAL_FP64(0.0, d6);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
+
+ TEARDOWN();
+}
+
+
+TEST(fsqrt) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 0.0);
+ __ Fmov(s17, 1.0);
+ __ Fmov(s18, 0.25);
+ __ Fmov(s19, 65536.0);
+ __ Fmov(s20, -0.0);
+ __ Fmov(s21, kFP32PositiveInfinity);
+ __ Fmov(s22, -1.0);
+ __ Fmov(d23, 0.0);
+ __ Fmov(d24, 1.0);
+ __ Fmov(d25, 0.25);
+ __ Fmov(d26, 4294967296.0);
+ __ Fmov(d27, -0.0);
+ __ Fmov(d28, kFP64PositiveInfinity);
+ __ Fmov(d29, -1.0);
+
+ __ Fsqrt(s0, s16);
+ __ Fsqrt(s1, s17);
+ __ Fsqrt(s2, s18);
+ __ Fsqrt(s3, s19);
+ __ Fsqrt(s4, s20);
+ __ Fsqrt(s5, s21);
+ __ Fsqrt(s6, s22);
+ __ Fsqrt(d7, d23);
+ __ Fsqrt(d8, d24);
+ __ Fsqrt(d9, d25);
+ __ Fsqrt(d10, d26);
+ __ Fsqrt(d11, d27);
+ __ Fsqrt(d12, d28);
+ __ Fsqrt(d13, d29);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(0.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(0.5, s2);
+ ASSERT_EQUAL_FP32(256.0, s3);
+ ASSERT_EQUAL_FP32(-0.0, s4);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
+ ASSERT_EQUAL_FP64(0.0, d7);
+ ASSERT_EQUAL_FP64(1.0, d8);
+ ASSERT_EQUAL_FP64(0.5, d9);
+ ASSERT_EQUAL_FP64(65536.0, d10);
+ ASSERT_EQUAL_FP64(-0.0, d11);
+ ASSERT_EQUAL_FP64(kFP32PositiveInfinity, d12);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
+
+ TEARDOWN();
+}
+
+
+TEST(frinta) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, 1.9);
+ __ Fmov(s20, 2.5);
+ __ Fmov(s21, -1.5);
+ __ Fmov(s22, -2.5);
+ __ Fmov(s23, kFP32PositiveInfinity);
+ __ Fmov(s24, kFP32NegativeInfinity);
+ __ Fmov(s25, 0.0);
+ __ Fmov(s26, -0.0);
+
+ __ Frinta(s0, s16);
+ __ Frinta(s1, s17);
+ __ Frinta(s2, s18);
+ __ Frinta(s3, s19);
+ __ Frinta(s4, s20);
+ __ Frinta(s5, s21);
+ __ Frinta(s6, s22);
+ __ Frinta(s7, s23);
+ __ Frinta(s8, s24);
+ __ Frinta(s9, s25);
+ __ Frinta(s10, s26);
+
+ __ Fmov(d16, 1.0);
+ __ Fmov(d17, 1.1);
+ __ Fmov(d18, 1.5);
+ __ Fmov(d19, 1.9);
+ __ Fmov(d20, 2.5);
+ __ Fmov(d21, -1.5);
+ __ Fmov(d22, -2.5);
+ __ Fmov(d23, kFP32PositiveInfinity);
+ __ Fmov(d24, kFP32NegativeInfinity);
+ __ Fmov(d25, 0.0);
+ __ Fmov(d26, -0.0);
+
+ __ Frinta(d11, d16);
+ __ Frinta(d12, d17);
+ __ Frinta(d13, d18);
+ __ Frinta(d14, d19);
+ __ Frinta(d15, d20);
+ __ Frinta(d16, d21);
+ __ Frinta(d17, d22);
+ __ Frinta(d18, d23);
+ __ Frinta(d19, d24);
+ __ Frinta(d20, d25);
+ __ Frinta(d21, d26);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(2.0, s2);
+ ASSERT_EQUAL_FP32(2.0, s3);
+ ASSERT_EQUAL_FP32(3.0, s4);
+ ASSERT_EQUAL_FP32(-2.0, s5);
+ ASSERT_EQUAL_FP32(-3.0, s6);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
+ ASSERT_EQUAL_FP32(0.0, s9);
+ ASSERT_EQUAL_FP32(-0.0, s10);
+ ASSERT_EQUAL_FP64(1.0, d11);
+ ASSERT_EQUAL_FP64(1.0, d12);
+ ASSERT_EQUAL_FP64(2.0, d13);
+ ASSERT_EQUAL_FP64(2.0, d14);
+ ASSERT_EQUAL_FP64(3.0, d15);
+ ASSERT_EQUAL_FP64(-2.0, d16);
+ ASSERT_EQUAL_FP64(-3.0, d17);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
+ ASSERT_EQUAL_FP64(0.0, d20);
+ ASSERT_EQUAL_FP64(-0.0, d21);
+
+ TEARDOWN();
+}
+
+
+TEST(frintn) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, 1.9);
+ __ Fmov(s20, 2.5);
+ __ Fmov(s21, -1.5);
+ __ Fmov(s22, -2.5);
+ __ Fmov(s23, kFP32PositiveInfinity);
+ __ Fmov(s24, kFP32NegativeInfinity);
+ __ Fmov(s25, 0.0);
+ __ Fmov(s26, -0.0);
+
+ __ Frintn(s0, s16);
+ __ Frintn(s1, s17);
+ __ Frintn(s2, s18);
+ __ Frintn(s3, s19);
+ __ Frintn(s4, s20);
+ __ Frintn(s5, s21);
+ __ Frintn(s6, s22);
+ __ Frintn(s7, s23);
+ __ Frintn(s8, s24);
+ __ Frintn(s9, s25);
+ __ Frintn(s10, s26);
+
+ __ Fmov(d16, 1.0);
+ __ Fmov(d17, 1.1);
+ __ Fmov(d18, 1.5);
+ __ Fmov(d19, 1.9);
+ __ Fmov(d20, 2.5);
+ __ Fmov(d21, -1.5);
+ __ Fmov(d22, -2.5);
+ __ Fmov(d23, kFP32PositiveInfinity);
+ __ Fmov(d24, kFP32NegativeInfinity);
+ __ Fmov(d25, 0.0);
+ __ Fmov(d26, -0.0);
+
+ __ Frintn(d11, d16);
+ __ Frintn(d12, d17);
+ __ Frintn(d13, d18);
+ __ Frintn(d14, d19);
+ __ Frintn(d15, d20);
+ __ Frintn(d16, d21);
+ __ Frintn(d17, d22);
+ __ Frintn(d18, d23);
+ __ Frintn(d19, d24);
+ __ Frintn(d20, d25);
+ __ Frintn(d21, d26);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(2.0, s2);
+ ASSERT_EQUAL_FP32(2.0, s3);
+ ASSERT_EQUAL_FP32(2.0, s4);
+ ASSERT_EQUAL_FP32(-2.0, s5);
+ ASSERT_EQUAL_FP32(-2.0, s6);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
+ ASSERT_EQUAL_FP32(0.0, s9);
+ ASSERT_EQUAL_FP32(-0.0, s10);
+ ASSERT_EQUAL_FP64(1.0, d11);
+ ASSERT_EQUAL_FP64(1.0, d12);
+ ASSERT_EQUAL_FP64(2.0, d13);
+ ASSERT_EQUAL_FP64(2.0, d14);
+ ASSERT_EQUAL_FP64(2.0, d15);
+ ASSERT_EQUAL_FP64(-2.0, d16);
+ ASSERT_EQUAL_FP64(-2.0, d17);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
+ ASSERT_EQUAL_FP64(0.0, d20);
+ ASSERT_EQUAL_FP64(-0.0, d21);
+
+ TEARDOWN();
+}
+
+
+TEST(frintz) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, 1.9);
+ __ Fmov(s20, 2.5);
+ __ Fmov(s21, -1.5);
+ __ Fmov(s22, -2.5);
+ __ Fmov(s23, kFP32PositiveInfinity);
+ __ Fmov(s24, kFP32NegativeInfinity);
+ __ Fmov(s25, 0.0);
+ __ Fmov(s26, -0.0);
+
+ __ Frintz(s0, s16);
+ __ Frintz(s1, s17);
+ __ Frintz(s2, s18);
+ __ Frintz(s3, s19);
+ __ Frintz(s4, s20);
+ __ Frintz(s5, s21);
+ __ Frintz(s6, s22);
+ __ Frintz(s7, s23);
+ __ Frintz(s8, s24);
+ __ Frintz(s9, s25);
+ __ Frintz(s10, s26);
+
+ __ Fmov(d16, 1.0);
+ __ Fmov(d17, 1.1);
+ __ Fmov(d18, 1.5);
+ __ Fmov(d19, 1.9);
+ __ Fmov(d20, 2.5);
+ __ Fmov(d21, -1.5);
+ __ Fmov(d22, -2.5);
+ __ Fmov(d23, kFP32PositiveInfinity);
+ __ Fmov(d24, kFP32NegativeInfinity);
+ __ Fmov(d25, 0.0);
+ __ Fmov(d26, -0.0);
+
+ __ Frintz(d11, d16);
+ __ Frintz(d12, d17);
+ __ Frintz(d13, d18);
+ __ Frintz(d14, d19);
+ __ Frintz(d15, d20);
+ __ Frintz(d16, d21);
+ __ Frintz(d17, d22);
+ __ Frintz(d18, d23);
+ __ Frintz(d19, d24);
+ __ Frintz(d20, d25);
+ __ Frintz(d21, d26);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP32(1.0, s0);
+ ASSERT_EQUAL_FP32(1.0, s1);
+ ASSERT_EQUAL_FP32(1.0, s2);
+ ASSERT_EQUAL_FP32(1.0, s3);
+ ASSERT_EQUAL_FP32(2.0, s4);
+ ASSERT_EQUAL_FP32(-1.0, s5);
+ ASSERT_EQUAL_FP32(-2.0, s6);
+ ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
+ ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
+ ASSERT_EQUAL_FP32(0.0, s9);
+ ASSERT_EQUAL_FP32(-0.0, s10);
+ ASSERT_EQUAL_FP64(1.0, d11);
+ ASSERT_EQUAL_FP64(1.0, d12);
+ ASSERT_EQUAL_FP64(1.0, d13);
+ ASSERT_EQUAL_FP64(1.0, d14);
+ ASSERT_EQUAL_FP64(2.0, d15);
+ ASSERT_EQUAL_FP64(-1.0, d16);
+ ASSERT_EQUAL_FP64(-2.0, d17);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
+ ASSERT_EQUAL_FP64(0.0, d20);
+ ASSERT_EQUAL_FP64(-0.0, d21);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvt_ds) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, 1.9);
+ __ Fmov(s20, 2.5);
+ __ Fmov(s21, -1.5);
+ __ Fmov(s22, -2.5);
+ __ Fmov(s23, kFP32PositiveInfinity);
+ __ Fmov(s24, kFP32NegativeInfinity);
+ __ Fmov(s25, 0.0);
+ __ Fmov(s26, -0.0);
+ __ Fmov(s27, FLT_MAX);
+ __ Fmov(s28, FLT_MIN);
+ __ Fmov(s29, rawbits_to_float(0x7fc12345)); // Quiet NaN.
+ __ Fmov(s30, rawbits_to_float(0x7f812345)); // Signalling NaN.
+
+ __ Fcvt(d0, s16);
+ __ Fcvt(d1, s17);
+ __ Fcvt(d2, s18);
+ __ Fcvt(d3, s19);
+ __ Fcvt(d4, s20);
+ __ Fcvt(d5, s21);
+ __ Fcvt(d6, s22);
+ __ Fcvt(d7, s23);
+ __ Fcvt(d8, s24);
+ __ Fcvt(d9, s25);
+ __ Fcvt(d10, s26);
+ __ Fcvt(d11, s27);
+ __ Fcvt(d12, s28);
+ __ Fcvt(d13, s29);
+ __ Fcvt(d14, s30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_FP64(1.0f, d0);
+ ASSERT_EQUAL_FP64(1.1f, d1);
+ ASSERT_EQUAL_FP64(1.5f, d2);
+ ASSERT_EQUAL_FP64(1.9f, d3);
+ ASSERT_EQUAL_FP64(2.5f, d4);
+ ASSERT_EQUAL_FP64(-1.5f, d5);
+ ASSERT_EQUAL_FP64(-2.5f, d6);
+ ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
+ ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
+ ASSERT_EQUAL_FP64(0.0f, d9);
+ ASSERT_EQUAL_FP64(-0.0f, d10);
+ ASSERT_EQUAL_FP64(FLT_MAX, d11);
+ ASSERT_EQUAL_FP64(FLT_MIN, d12);
+
+ // Check that the NaN payload is preserved according to ARM64 conversion
+ // rules:
+ // - The sign bit is preserved.
+ // - The top bit of the mantissa is forced to 1 (making it a quiet NaN).
+ // - The remaining mantissa bits are copied until they run out.
+ // - The low-order bits that haven't already been assigned are set to 0.
+ ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d13);
+ ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d14);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvt_sd) {
+ INIT_V8();
+ // There are a huge number of corner-cases to check, so this test iterates
+ // through a list. The list is then negated and checked again (since the sign
+ // is irrelevant in ties-to-even rounding), so the list shouldn't include any
+ // negative values.
+ //
+ // Note that this test only checks ties-to-even rounding, because that is all
+ // that the simulator supports.
+ struct {double in; float expected;} test[] = {
+ // Check some simple conversions.
+ {0.0, 0.0f},
+ {1.0, 1.0f},
+ {1.5, 1.5f},
+ {2.0, 2.0f},
+ {FLT_MAX, FLT_MAX},
+ // - The smallest normalized float.
+ {pow(2.0, -126), powf(2, -126)},
+ // - Normal floats that need (ties-to-even) rounding.
+ // For normalized numbers:
+ // bit 29 (0x0000000020000000) is the lowest-order bit which will
+ // fit in the float's mantissa.
+ {rawbits_to_double(0x3ff0000000000000), rawbits_to_float(0x3f800000)},
+ {rawbits_to_double(0x3ff0000000000001), rawbits_to_float(0x3f800000)},
+ {rawbits_to_double(0x3ff0000010000000), rawbits_to_float(0x3f800000)},
+ {rawbits_to_double(0x3ff0000010000001), rawbits_to_float(0x3f800001)},
+ {rawbits_to_double(0x3ff0000020000000), rawbits_to_float(0x3f800001)},
+ {rawbits_to_double(0x3ff0000020000001), rawbits_to_float(0x3f800001)},
+ {rawbits_to_double(0x3ff0000030000000), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000030000001), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000040000000), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000040000001), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000050000000), rawbits_to_float(0x3f800002)},
+ {rawbits_to_double(0x3ff0000050000001), rawbits_to_float(0x3f800003)},
+ {rawbits_to_double(0x3ff0000060000000), rawbits_to_float(0x3f800003)},
+ // - A mantissa that overflows into the exponent during rounding.
+ {rawbits_to_double(0x3feffffff0000000), rawbits_to_float(0x3f800000)},
+ // - The largest double that rounds to a normal float.
+ {rawbits_to_double(0x47efffffefffffff), rawbits_to_float(0x7f7fffff)},
+
+ // Doubles that are too big for a float.
+ {kFP64PositiveInfinity, kFP32PositiveInfinity},
+ {DBL_MAX, kFP32PositiveInfinity},
+ // - The smallest exponent that's too big for a float.
+ {pow(2.0, 128), kFP32PositiveInfinity},
+ // - This exponent is in range, but the value rounds to infinity.
+ {rawbits_to_double(0x47effffff0000000), kFP32PositiveInfinity},
+
+ // Doubles that are too small for a float.
+ // - The smallest (subnormal) double.
+ {DBL_MIN, 0.0},
+ // - The largest double which is too small for a subnormal float.
+ {rawbits_to_double(0x3690000000000000), rawbits_to_float(0x00000000)},
+
+ // Normal doubles that become subnormal floats.
+ // - The largest subnormal float.
+ {rawbits_to_double(0x380fffffc0000000), rawbits_to_float(0x007fffff)},
+ // - The smallest subnormal float.
+ {rawbits_to_double(0x36a0000000000000), rawbits_to_float(0x00000001)},
+ // - Subnormal floats that need (ties-to-even) rounding.
+ // For these subnormals:
+ // bit 34 (0x0000000400000000) is the lowest-order bit which will
+ // fit in the float's mantissa.
+ {rawbits_to_double(0x37c159e000000000), rawbits_to_float(0x00045678)},
+ {rawbits_to_double(0x37c159e000000001), rawbits_to_float(0x00045678)},
+ {rawbits_to_double(0x37c159e200000000), rawbits_to_float(0x00045678)},
+ {rawbits_to_double(0x37c159e200000001), rawbits_to_float(0x00045679)},
+ {rawbits_to_double(0x37c159e400000000), rawbits_to_float(0x00045679)},
+ {rawbits_to_double(0x37c159e400000001), rawbits_to_float(0x00045679)},
+ {rawbits_to_double(0x37c159e600000000), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159e600000001), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159e800000000), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159e800000001), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159ea00000000), rawbits_to_float(0x0004567a)},
+ {rawbits_to_double(0x37c159ea00000001), rawbits_to_float(0x0004567b)},
+ {rawbits_to_double(0x37c159ec00000000), rawbits_to_float(0x0004567b)},
+ // - The smallest double which rounds up to become a subnormal float.
+ {rawbits_to_double(0x3690000000000001), rawbits_to_float(0x00000001)},
+
+ // Check NaN payload preservation.
+ {rawbits_to_double(0x7ff82468a0000000), rawbits_to_float(0x7fc12345)},
+ {rawbits_to_double(0x7ff82468bfffffff), rawbits_to_float(0x7fc12345)},
+ // - Signalling NaNs become quiet NaNs.
+ {rawbits_to_double(0x7ff02468a0000000), rawbits_to_float(0x7fc12345)},
+ {rawbits_to_double(0x7ff02468bfffffff), rawbits_to_float(0x7fc12345)},
+ {rawbits_to_double(0x7ff000001fffffff), rawbits_to_float(0x7fc00000)},
+ };
+ int count = sizeof(test) / sizeof(test[0]);
+
+ for (int i = 0; i < count; i++) {
+ double in = test[i].in;
+ float expected = test[i].expected;
+
+ // We only expect positive input.
+ ASSERT(std::signbit(in) == 0);
+ ASSERT(std::signbit(expected) == 0);
+
+ SETUP();
+ START();
+
+ __ Fmov(d10, in);
+ __ Fcvt(s20, d10);
+
+ __ Fmov(d11, -in);
+ __ Fcvt(s21, d11);
+
+ END();
+ RUN();
+ ASSERT_EQUAL_FP32(expected, s20);
+ ASSERT_EQUAL_FP32(-expected, s21);
+ TEARDOWN();
+ }
+}
+
+
+TEST(fcvtas) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 2.5);
+ __ Fmov(s3, -2.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 2.5);
+ __ Fmov(d11, -2.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 2.5);
+ __ Fmov(s19, -2.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 2.5);
+ __ Fmov(d26, -2.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtas(w0, s0);
+ __ Fcvtas(w1, s1);
+ __ Fcvtas(w2, s2);
+ __ Fcvtas(w3, s3);
+ __ Fcvtas(w4, s4);
+ __ Fcvtas(w5, s5);
+ __ Fcvtas(w6, s6);
+ __ Fcvtas(w7, s7);
+ __ Fcvtas(w8, d8);
+ __ Fcvtas(w9, d9);
+ __ Fcvtas(w10, d10);
+ __ Fcvtas(w11, d11);
+ __ Fcvtas(w12, d12);
+ __ Fcvtas(w13, d13);
+ __ Fcvtas(w14, d14);
+ __ Fcvtas(w15, d15);
+ __ Fcvtas(x17, s17);
+ __ Fcvtas(x18, s18);
+ __ Fcvtas(x19, s19);
+ __ Fcvtas(x20, s20);
+ __ Fcvtas(x21, s21);
+ __ Fcvtas(x22, s22);
+ __ Fcvtas(x23, s23);
+ __ Fcvtas(x24, d24);
+ __ Fcvtas(x25, d25);
+ __ Fcvtas(x26, d26);
+ __ Fcvtas(x27, d27);
+ __ Fcvtas(x28, d28);
+ __ Fcvtas(x29, d29);
+ __ Fcvtas(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(3, x2);
+ ASSERT_EQUAL_64(0xfffffffd, x3);
+ ASSERT_EQUAL_64(0x7fffffff, x4);
+ ASSERT_EQUAL_64(0x80000000, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0x80000080, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(3, x10);
+ ASSERT_EQUAL_64(0xfffffffd, x11);
+ ASSERT_EQUAL_64(0x7fffffff, x12);
+ ASSERT_EQUAL_64(0x80000000, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(0x80000001, x15);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(3, x18);
+ ASSERT_EQUAL_64(0xfffffffffffffffdUL, x19);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x8000008000000000UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(3, x25);
+ ASSERT_EQUAL_64(0xfffffffffffffffdUL, x26);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtau) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 2.5);
+ __ Fmov(s3, -2.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 2.5);
+ __ Fmov(d11, -2.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, 0xfffffffe);
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 2.5);
+ __ Fmov(s19, -2.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0xffffff0000000000UL); // Largest float < UINT64_MAX.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 2.5);
+ __ Fmov(d26, -2.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0xfffffffffffff800UL); // Largest double < UINT64_MAX.
+ __ Fmov(s30, 0x100000000UL);
+
+ __ Fcvtau(w0, s0);
+ __ Fcvtau(w1, s1);
+ __ Fcvtau(w2, s2);
+ __ Fcvtau(w3, s3);
+ __ Fcvtau(w4, s4);
+ __ Fcvtau(w5, s5);
+ __ Fcvtau(w6, s6);
+ __ Fcvtau(w8, d8);
+ __ Fcvtau(w9, d9);
+ __ Fcvtau(w10, d10);
+ __ Fcvtau(w11, d11);
+ __ Fcvtau(w12, d12);
+ __ Fcvtau(w13, d13);
+ __ Fcvtau(w14, d14);
+ __ Fcvtau(w15, d15);
+ __ Fcvtau(x16, s16);
+ __ Fcvtau(x17, s17);
+ __ Fcvtau(x18, s18);
+ __ Fcvtau(x19, s19);
+ __ Fcvtau(x20, s20);
+ __ Fcvtau(x21, s21);
+ __ Fcvtau(x22, s22);
+ __ Fcvtau(x24, d24);
+ __ Fcvtau(x25, d25);
+ __ Fcvtau(x26, d26);
+ __ Fcvtau(x27, d27);
+ __ Fcvtau(x28, d28);
+ __ Fcvtau(x29, d29);
+ __ Fcvtau(w30, s30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(3, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(0xffffffff, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0xffffff00, x6);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(3, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0xffffffff, x12);
+ ASSERT_EQUAL_64(0, x13);
+ ASSERT_EQUAL_64(0xfffffffe, x14);
+ ASSERT_EQUAL_64(1, x16);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(3, x18);
+ ASSERT_EQUAL_64(0, x19);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0, x21);
+ ASSERT_EQUAL_64(0xffffff0000000000UL, x22);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(3, x25);
+ ASSERT_EQUAL_64(0, x26);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0, x28);
+ ASSERT_EQUAL_64(0xfffffffffffff800UL, x29);
+ ASSERT_EQUAL_64(0xffffffff, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtms) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtms(w0, s0);
+ __ Fcvtms(w1, s1);
+ __ Fcvtms(w2, s2);
+ __ Fcvtms(w3, s3);
+ __ Fcvtms(w4, s4);
+ __ Fcvtms(w5, s5);
+ __ Fcvtms(w6, s6);
+ __ Fcvtms(w7, s7);
+ __ Fcvtms(w8, d8);
+ __ Fcvtms(w9, d9);
+ __ Fcvtms(w10, d10);
+ __ Fcvtms(w11, d11);
+ __ Fcvtms(w12, d12);
+ __ Fcvtms(w13, d13);
+ __ Fcvtms(w14, d14);
+ __ Fcvtms(w15, d15);
+ __ Fcvtms(x17, s17);
+ __ Fcvtms(x18, s18);
+ __ Fcvtms(x19, s19);
+ __ Fcvtms(x20, s20);
+ __ Fcvtms(x21, s21);
+ __ Fcvtms(x22, s22);
+ __ Fcvtms(x23, s23);
+ __ Fcvtms(x24, d24);
+ __ Fcvtms(x25, d25);
+ __ Fcvtms(x26, d26);
+ __ Fcvtms(x27, d27);
+ __ Fcvtms(x28, d28);
+ __ Fcvtms(x29, d29);
+ __ Fcvtms(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0xfffffffe, x3);
+ ASSERT_EQUAL_64(0x7fffffff, x4);
+ ASSERT_EQUAL_64(0x80000000, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0x80000080, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0xfffffffe, x11);
+ ASSERT_EQUAL_64(0x7fffffff, x12);
+ ASSERT_EQUAL_64(0x80000000, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(0x80000001, x15);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(1, x18);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x8000008000000000UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(1, x25);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtmu) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtmu(w0, s0);
+ __ Fcvtmu(w1, s1);
+ __ Fcvtmu(w2, s2);
+ __ Fcvtmu(w3, s3);
+ __ Fcvtmu(w4, s4);
+ __ Fcvtmu(w5, s5);
+ __ Fcvtmu(w6, s6);
+ __ Fcvtmu(w7, s7);
+ __ Fcvtmu(w8, d8);
+ __ Fcvtmu(w9, d9);
+ __ Fcvtmu(w10, d10);
+ __ Fcvtmu(w11, d11);
+ __ Fcvtmu(w12, d12);
+ __ Fcvtmu(w13, d13);
+ __ Fcvtmu(w14, d14);
+ __ Fcvtmu(x17, s17);
+ __ Fcvtmu(x18, s18);
+ __ Fcvtmu(x19, s19);
+ __ Fcvtmu(x20, s20);
+ __ Fcvtmu(x21, s21);
+ __ Fcvtmu(x22, s22);
+ __ Fcvtmu(x23, s23);
+ __ Fcvtmu(x24, d24);
+ __ Fcvtmu(x25, d25);
+ __ Fcvtmu(x26, d26);
+ __ Fcvtmu(x27, d27);
+ __ Fcvtmu(x28, d28);
+ __ Fcvtmu(x29, d29);
+ __ Fcvtmu(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(0xffffffff, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0xffffffff, x12);
+ ASSERT_EQUAL_64(0, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(1, x18);
+ ASSERT_EQUAL_64(0x0UL, x19);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x0UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x0UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(1, x25);
+ ASSERT_EQUAL_64(0x0UL, x26);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x0UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x0UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtns) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtns(w0, s0);
+ __ Fcvtns(w1, s1);
+ __ Fcvtns(w2, s2);
+ __ Fcvtns(w3, s3);
+ __ Fcvtns(w4, s4);
+ __ Fcvtns(w5, s5);
+ __ Fcvtns(w6, s6);
+ __ Fcvtns(w7, s7);
+ __ Fcvtns(w8, d8);
+ __ Fcvtns(w9, d9);
+ __ Fcvtns(w10, d10);
+ __ Fcvtns(w11, d11);
+ __ Fcvtns(w12, d12);
+ __ Fcvtns(w13, d13);
+ __ Fcvtns(w14, d14);
+ __ Fcvtns(w15, d15);
+ __ Fcvtns(x17, s17);
+ __ Fcvtns(x18, s18);
+ __ Fcvtns(x19, s19);
+ __ Fcvtns(x20, s20);
+ __ Fcvtns(x21, s21);
+ __ Fcvtns(x22, s22);
+ __ Fcvtns(x23, s23);
+ __ Fcvtns(x24, d24);
+ __ Fcvtns(x25, d25);
+ __ Fcvtns(x26, d26);
+ __ Fcvtns(x27, d27);
+// __ Fcvtns(x28, d28);
+ __ Fcvtns(x29, d29);
+ __ Fcvtns(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(2, x2);
+ ASSERT_EQUAL_64(0xfffffffe, x3);
+ ASSERT_EQUAL_64(0x7fffffff, x4);
+ ASSERT_EQUAL_64(0x80000000, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0x80000080, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(2, x10);
+ ASSERT_EQUAL_64(0xfffffffe, x11);
+ ASSERT_EQUAL_64(0x7fffffff, x12);
+ ASSERT_EQUAL_64(0x80000000, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(0x80000001, x15);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(2, x18);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x8000008000000000UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(2, x25);
+ ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
+// ASSERT_EQUAL_64(0x8000000000000000UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtnu) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, 0xfffffffe);
+ __ Fmov(s16, 1.0);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0xffffff0000000000UL); // Largest float < UINT64_MAX.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0xfffffffffffff800UL); // Largest double < UINT64_MAX.
+ __ Fmov(s30, 0x100000000UL);
+
+ __ Fcvtnu(w0, s0);
+ __ Fcvtnu(w1, s1);
+ __ Fcvtnu(w2, s2);
+ __ Fcvtnu(w3, s3);
+ __ Fcvtnu(w4, s4);
+ __ Fcvtnu(w5, s5);
+ __ Fcvtnu(w6, s6);
+ __ Fcvtnu(w8, d8);
+ __ Fcvtnu(w9, d9);
+ __ Fcvtnu(w10, d10);
+ __ Fcvtnu(w11, d11);
+ __ Fcvtnu(w12, d12);
+ __ Fcvtnu(w13, d13);
+ __ Fcvtnu(w14, d14);
+ __ Fcvtnu(w15, d15);
+ __ Fcvtnu(x16, s16);
+ __ Fcvtnu(x17, s17);
+ __ Fcvtnu(x18, s18);
+ __ Fcvtnu(x19, s19);
+ __ Fcvtnu(x20, s20);
+ __ Fcvtnu(x21, s21);
+ __ Fcvtnu(x22, s22);
+ __ Fcvtnu(x24, d24);
+ __ Fcvtnu(x25, d25);
+ __ Fcvtnu(x26, d26);
+ __ Fcvtnu(x27, d27);
+// __ Fcvtnu(x28, d28);
+ __ Fcvtnu(x29, d29);
+ __ Fcvtnu(w30, s30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(2, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(0xffffffff, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0xffffff00, x6);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(2, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0xffffffff, x12);
+ ASSERT_EQUAL_64(0, x13);
+ ASSERT_EQUAL_64(0xfffffffe, x14);
+ ASSERT_EQUAL_64(1, x16);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(2, x18);
+ ASSERT_EQUAL_64(0, x19);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0, x21);
+ ASSERT_EQUAL_64(0xffffff0000000000UL, x22);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(2, x25);
+ ASSERT_EQUAL_64(0, x26);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
+// ASSERT_EQUAL_64(0, x28);
+ ASSERT_EQUAL_64(0xfffffffffffff800UL, x29);
+ ASSERT_EQUAL_64(0xffffffff, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtzs) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtzs(w0, s0);
+ __ Fcvtzs(w1, s1);
+ __ Fcvtzs(w2, s2);
+ __ Fcvtzs(w3, s3);
+ __ Fcvtzs(w4, s4);
+ __ Fcvtzs(w5, s5);
+ __ Fcvtzs(w6, s6);
+ __ Fcvtzs(w7, s7);
+ __ Fcvtzs(w8, d8);
+ __ Fcvtzs(w9, d9);
+ __ Fcvtzs(w10, d10);
+ __ Fcvtzs(w11, d11);
+ __ Fcvtzs(w12, d12);
+ __ Fcvtzs(w13, d13);
+ __ Fcvtzs(w14, d14);
+ __ Fcvtzs(w15, d15);
+ __ Fcvtzs(x17, s17);
+ __ Fcvtzs(x18, s18);
+ __ Fcvtzs(x19, s19);
+ __ Fcvtzs(x20, s20);
+ __ Fcvtzs(x21, s21);
+ __ Fcvtzs(x22, s22);
+ __ Fcvtzs(x23, s23);
+ __ Fcvtzs(x24, d24);
+ __ Fcvtzs(x25, d25);
+ __ Fcvtzs(x26, d26);
+ __ Fcvtzs(x27, d27);
+ __ Fcvtzs(x28, d28);
+ __ Fcvtzs(x29, d29);
+ __ Fcvtzs(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0xffffffff, x3);
+ ASSERT_EQUAL_64(0x7fffffff, x4);
+ ASSERT_EQUAL_64(0x80000000, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0x80000080, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0xffffffff, x11);
+ ASSERT_EQUAL_64(0x7fffffff, x12);
+ ASSERT_EQUAL_64(0x80000000, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(0x80000001, x15);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(1, x18);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x19);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x8000008000000000UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(1, x25);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x26);
+ ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x8000000000000000UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+
+ TEARDOWN();
+}
+
+
+TEST(fcvtzu) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Fmov(s0, 1.0);
+ __ Fmov(s1, 1.1);
+ __ Fmov(s2, 1.5);
+ __ Fmov(s3, -1.5);
+ __ Fmov(s4, kFP32PositiveInfinity);
+ __ Fmov(s5, kFP32NegativeInfinity);
+ __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
+ __ Fneg(s7, s6); // Smallest float > INT32_MIN.
+ __ Fmov(d8, 1.0);
+ __ Fmov(d9, 1.1);
+ __ Fmov(d10, 1.5);
+ __ Fmov(d11, -1.5);
+ __ Fmov(d12, kFP64PositiveInfinity);
+ __ Fmov(d13, kFP64NegativeInfinity);
+ __ Fmov(d14, kWMaxInt - 1);
+ __ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s17, 1.1);
+ __ Fmov(s18, 1.5);
+ __ Fmov(s19, -1.5);
+ __ Fmov(s20, kFP32PositiveInfinity);
+ __ Fmov(s21, kFP32NegativeInfinity);
+ __ Fmov(s22, 0x7fffff8000000000UL); // Largest float < INT64_MAX.
+ __ Fneg(s23, s22); // Smallest float > INT64_MIN.
+ __ Fmov(d24, 1.1);
+ __ Fmov(d25, 1.5);
+ __ Fmov(d26, -1.5);
+ __ Fmov(d27, kFP64PositiveInfinity);
+ __ Fmov(d28, kFP64NegativeInfinity);
+ __ Fmov(d29, 0x7ffffffffffffc00UL); // Largest double < INT64_MAX.
+ __ Fneg(d30, d29); // Smallest double > INT64_MIN.
+
+ __ Fcvtzu(w0, s0);
+ __ Fcvtzu(w1, s1);
+ __ Fcvtzu(w2, s2);
+ __ Fcvtzu(w3, s3);
+ __ Fcvtzu(w4, s4);
+ __ Fcvtzu(w5, s5);
+ __ Fcvtzu(w6, s6);
+ __ Fcvtzu(w7, s7);
+ __ Fcvtzu(w8, d8);
+ __ Fcvtzu(w9, d9);
+ __ Fcvtzu(w10, d10);
+ __ Fcvtzu(w11, d11);
+ __ Fcvtzu(w12, d12);
+ __ Fcvtzu(w13, d13);
+ __ Fcvtzu(w14, d14);
+ __ Fcvtzu(x17, s17);
+ __ Fcvtzu(x18, s18);
+ __ Fcvtzu(x19, s19);
+ __ Fcvtzu(x20, s20);
+ __ Fcvtzu(x21, s21);
+ __ Fcvtzu(x22, s22);
+ __ Fcvtzu(x23, s23);
+ __ Fcvtzu(x24, d24);
+ __ Fcvtzu(x25, d25);
+ __ Fcvtzu(x26, d26);
+ __ Fcvtzu(x27, d27);
+ __ Fcvtzu(x28, d28);
+ __ Fcvtzu(x29, d29);
+ __ Fcvtzu(x30, d30);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+ ASSERT_EQUAL_64(1, x1);
+ ASSERT_EQUAL_64(1, x2);
+ ASSERT_EQUAL_64(0, x3);
+ ASSERT_EQUAL_64(0xffffffff, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0x7fffff80, x6);
+ ASSERT_EQUAL_64(0, x7);
+ ASSERT_EQUAL_64(1, x8);
+ ASSERT_EQUAL_64(1, x9);
+ ASSERT_EQUAL_64(1, x10);
+ ASSERT_EQUAL_64(0, x11);
+ ASSERT_EQUAL_64(0xffffffff, x12);
+ ASSERT_EQUAL_64(0, x13);
+ ASSERT_EQUAL_64(0x7ffffffe, x14);
+ ASSERT_EQUAL_64(1, x17);
+ ASSERT_EQUAL_64(1, x18);
+ ASSERT_EQUAL_64(0x0UL, x19);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
+ ASSERT_EQUAL_64(0x0UL, x21);
+ ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
+ ASSERT_EQUAL_64(0x0UL, x23);
+ ASSERT_EQUAL_64(1, x24);
+ ASSERT_EQUAL_64(1, x25);
+ ASSERT_EQUAL_64(0x0UL, x26);
+ ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
+ ASSERT_EQUAL_64(0x0UL, x28);
+ ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
+ ASSERT_EQUAL_64(0x0UL, x30);
+
+ TEARDOWN();
+}
+
+
+// Test that scvtf and ucvtf can convert the 64-bit input into the expected
+// value. All possible values of 'fbits' are tested. The expected value is
+// modified accordingly in each case.
+//
+// The expected value is specified as the bit encoding of the expected double
+// produced by scvtf (expected_scvtf_bits) as well as ucvtf
+// (expected_ucvtf_bits).
+//
+// Where the input value is representable by int32_t or uint32_t, conversions
+// from W registers will also be tested.
+static void TestUScvtfHelper(uint64_t in,
+ uint64_t expected_scvtf_bits,
+ uint64_t expected_ucvtf_bits) {
+ uint64_t u64 = in;
+ uint32_t u32 = u64 & 0xffffffff;
+ int64_t s64 = static_cast<int64_t>(in);
+ int32_t s32 = s64 & 0x7fffffff;
+
+ bool cvtf_s32 = (s64 == s32);
+ bool cvtf_u32 = (u64 == u32);
+
+ double results_scvtf_x[65];
+ double results_ucvtf_x[65];
+ double results_scvtf_w[33];
+ double results_ucvtf_w[33];
+
+ SETUP();
+ START();
+
+ __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
+ __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
+ __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
+ __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
+
+ __ Mov(x10, s64);
+
+ // Corrupt the top word, in case it is accidentally used during W-register
+ // conversions.
+ __ Mov(x11, 0x5555555555555555);
+ __ Bfi(x11, x10, 0, kWRegSizeInBits);
+
+ // Test integer conversions.
+ __ Scvtf(d0, x10);
+ __ Ucvtf(d1, x10);
+ __ Scvtf(d2, w11);
+ __ Ucvtf(d3, w11);
+ __ Str(d0, MemOperand(x0));
+ __ Str(d1, MemOperand(x1));
+ __ Str(d2, MemOperand(x2));
+ __ Str(d3, MemOperand(x3));
+
+ // Test all possible values of fbits.
+ for (int fbits = 1; fbits <= 32; fbits++) {
+ __ Scvtf(d0, x10, fbits);
+ __ Ucvtf(d1, x10, fbits);
+ __ Scvtf(d2, w11, fbits);
+ __ Ucvtf(d3, w11, fbits);
+ __ Str(d0, MemOperand(x0, fbits * kDRegSize));
+ __ Str(d1, MemOperand(x1, fbits * kDRegSize));
+ __ Str(d2, MemOperand(x2, fbits * kDRegSize));
+ __ Str(d3, MemOperand(x3, fbits * kDRegSize));
+ }
+
+ // Conversions from W registers can only handle fbits values <= 32, so just
+ // test conversions from X registers for 32 < fbits <= 64.
+ for (int fbits = 33; fbits <= 64; fbits++) {
+ __ Scvtf(d0, x10, fbits);
+ __ Ucvtf(d1, x10, fbits);
+ __ Str(d0, MemOperand(x0, fbits * kDRegSize));
+ __ Str(d1, MemOperand(x1, fbits * kDRegSize));
+ }
+
+ END();
+ RUN();
+
+ // Check the results.
+ double expected_scvtf_base = rawbits_to_double(expected_scvtf_bits);
+ double expected_ucvtf_base = rawbits_to_double(expected_ucvtf_bits);
+
+ for (int fbits = 0; fbits <= 32; fbits++) {
+ double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
+ double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
+ ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
+ ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
+ if (cvtf_s32) ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_w[fbits]);
+ if (cvtf_u32) ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_w[fbits]);
+ }
+ for (int fbits = 33; fbits <= 64; fbits++) {
+ double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
+ double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
+ ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
+ ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(scvtf_ucvtf_double) {
+ INIT_V8();
+ // Simple conversions of positive numbers which require no rounding; the
+ // results should not depened on the rounding mode, and ucvtf and scvtf should
+ // produce the same result.
+ TestUScvtfHelper(0x0000000000000000, 0x0000000000000000, 0x0000000000000000);
+ TestUScvtfHelper(0x0000000000000001, 0x3ff0000000000000, 0x3ff0000000000000);
+ TestUScvtfHelper(0x0000000040000000, 0x41d0000000000000, 0x41d0000000000000);
+ TestUScvtfHelper(0x0000000100000000, 0x41f0000000000000, 0x41f0000000000000);
+ TestUScvtfHelper(0x4000000000000000, 0x43d0000000000000, 0x43d0000000000000);
+ // Test mantissa extremities.
+ TestUScvtfHelper(0x4000000000000400, 0x43d0000000000001, 0x43d0000000000001);
+ // The largest int32_t that fits in a double.
+ TestUScvtfHelper(0x000000007fffffff, 0x41dfffffffc00000, 0x41dfffffffc00000);
+ // Values that would be negative if treated as an int32_t.
+ TestUScvtfHelper(0x00000000ffffffff, 0x41efffffffe00000, 0x41efffffffe00000);
+ TestUScvtfHelper(0x0000000080000000, 0x41e0000000000000, 0x41e0000000000000);
+ TestUScvtfHelper(0x0000000080000001, 0x41e0000000200000, 0x41e0000000200000);
+ // The largest int64_t that fits in a double.
+ TestUScvtfHelper(0x7ffffffffffffc00, 0x43dfffffffffffff, 0x43dfffffffffffff);
+ // Check for bit pattern reproduction.
+ TestUScvtfHelper(0x0123456789abcde0, 0x43723456789abcde, 0x43723456789abcde);
+ TestUScvtfHelper(0x0000000012345678, 0x41b2345678000000, 0x41b2345678000000);
+
+ // Simple conversions of negative int64_t values. These require no rounding,
+ // and the results should not depend on the rounding mode.
+ TestUScvtfHelper(0xffffffffc0000000, 0xc1d0000000000000, 0x43effffffff80000);
+ TestUScvtfHelper(0xffffffff00000000, 0xc1f0000000000000, 0x43efffffffe00000);
+ TestUScvtfHelper(0xc000000000000000, 0xc3d0000000000000, 0x43e8000000000000);
+
+ // Conversions which require rounding.
+ TestUScvtfHelper(0x1000000000000000, 0x43b0000000000000, 0x43b0000000000000);
+ TestUScvtfHelper(0x1000000000000001, 0x43b0000000000000, 0x43b0000000000000);
+ TestUScvtfHelper(0x1000000000000080, 0x43b0000000000000, 0x43b0000000000000);
+ TestUScvtfHelper(0x1000000000000081, 0x43b0000000000001, 0x43b0000000000001);
+ TestUScvtfHelper(0x1000000000000100, 0x43b0000000000001, 0x43b0000000000001);
+ TestUScvtfHelper(0x1000000000000101, 0x43b0000000000001, 0x43b0000000000001);
+ TestUScvtfHelper(0x1000000000000180, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000181, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000200, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000201, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000280, 0x43b0000000000002, 0x43b0000000000002);
+ TestUScvtfHelper(0x1000000000000281, 0x43b0000000000003, 0x43b0000000000003);
+ TestUScvtfHelper(0x1000000000000300, 0x43b0000000000003, 0x43b0000000000003);
+ // Check rounding of negative int64_t values (and large uint64_t values).
+ TestUScvtfHelper(0x8000000000000000, 0xc3e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000001, 0xc3e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000200, 0xc3e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000201, 0xc3dfffffffffffff, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000400, 0xc3dfffffffffffff, 0x43e0000000000000);
+ TestUScvtfHelper(0x8000000000000401, 0xc3dfffffffffffff, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000600, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000601, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000800, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000801, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000a00, 0xc3dffffffffffffe, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000a01, 0xc3dffffffffffffd, 0x43e0000000000001);
+ TestUScvtfHelper(0x8000000000000c00, 0xc3dffffffffffffd, 0x43e0000000000002);
+ // Round up to produce a result that's too big for the input to represent.
+ TestUScvtfHelper(0x7ffffffffffffe00, 0x43e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0x7fffffffffffffff, 0x43e0000000000000, 0x43e0000000000000);
+ TestUScvtfHelper(0xfffffffffffffc00, 0xc090000000000000, 0x43f0000000000000);
+ TestUScvtfHelper(0xffffffffffffffff, 0xbff0000000000000, 0x43f0000000000000);
+}
+
+
+// The same as TestUScvtfHelper, but convert to floats.
+static void TestUScvtf32Helper(uint64_t in,
+ uint32_t expected_scvtf_bits,
+ uint32_t expected_ucvtf_bits) {
+ uint64_t u64 = in;
+ uint32_t u32 = u64 & 0xffffffff;
+ int64_t s64 = static_cast<int64_t>(in);
+ int32_t s32 = s64 & 0x7fffffff;
+
+ bool cvtf_s32 = (s64 == s32);
+ bool cvtf_u32 = (u64 == u32);
+
+ float results_scvtf_x[65];
+ float results_ucvtf_x[65];
+ float results_scvtf_w[33];
+ float results_ucvtf_w[33];
+
+ SETUP();
+ START();
+
+ __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
+ __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
+ __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
+ __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
+
+ __ Mov(x10, s64);
+
+ // Corrupt the top word, in case it is accidentally used during W-register
+ // conversions.
+ __ Mov(x11, 0x5555555555555555);
+ __ Bfi(x11, x10, 0, kWRegSizeInBits);
+
+ // Test integer conversions.
+ __ Scvtf(s0, x10);
+ __ Ucvtf(s1, x10);
+ __ Scvtf(s2, w11);
+ __ Ucvtf(s3, w11);
+ __ Str(s0, MemOperand(x0));
+ __ Str(s1, MemOperand(x1));
+ __ Str(s2, MemOperand(x2));
+ __ Str(s3, MemOperand(x3));
+
+ // Test all possible values of fbits.
+ for (int fbits = 1; fbits <= 32; fbits++) {
+ __ Scvtf(s0, x10, fbits);
+ __ Ucvtf(s1, x10, fbits);
+ __ Scvtf(s2, w11, fbits);
+ __ Ucvtf(s3, w11, fbits);
+ __ Str(s0, MemOperand(x0, fbits * kSRegSize));
+ __ Str(s1, MemOperand(x1, fbits * kSRegSize));
+ __ Str(s2, MemOperand(x2, fbits * kSRegSize));
+ __ Str(s3, MemOperand(x3, fbits * kSRegSize));
+ }
+
+ // Conversions from W registers can only handle fbits values <= 32, so just
+ // test conversions from X registers for 32 < fbits <= 64.
+ for (int fbits = 33; fbits <= 64; fbits++) {
+ __ Scvtf(s0, x10, fbits);
+ __ Ucvtf(s1, x10, fbits);
+ __ Str(s0, MemOperand(x0, fbits * kSRegSize));
+ __ Str(s1, MemOperand(x1, fbits * kSRegSize));
+ }
+
+ END();
+ RUN();
+
+ // Check the results.
+ float expected_scvtf_base = rawbits_to_float(expected_scvtf_bits);
+ float expected_ucvtf_base = rawbits_to_float(expected_ucvtf_bits);
+
+ for (int fbits = 0; fbits <= 32; fbits++) {
+ float expected_scvtf = expected_scvtf_base / powf(2, fbits);
+ float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
+ ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
+ ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
+ if (cvtf_s32) ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_w[fbits]);
+ if (cvtf_u32) ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_w[fbits]);
+ break;
+ }
+ for (int fbits = 33; fbits <= 64; fbits++) {
+ break;
+ float expected_scvtf = expected_scvtf_base / powf(2, fbits);
+ float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
+ ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
+ ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(scvtf_ucvtf_float) {
+ INIT_V8();
+ // Simple conversions of positive numbers which require no rounding; the
+ // results should not depened on the rounding mode, and ucvtf and scvtf should
+ // produce the same result.
+ TestUScvtf32Helper(0x0000000000000000, 0x00000000, 0x00000000);
+ TestUScvtf32Helper(0x0000000000000001, 0x3f800000, 0x3f800000);
+ TestUScvtf32Helper(0x0000000040000000, 0x4e800000, 0x4e800000);
+ TestUScvtf32Helper(0x0000000100000000, 0x4f800000, 0x4f800000);
+ TestUScvtf32Helper(0x4000000000000000, 0x5e800000, 0x5e800000);
+ // Test mantissa extremities.
+ TestUScvtf32Helper(0x0000000000800001, 0x4b000001, 0x4b000001);
+ TestUScvtf32Helper(0x4000008000000000, 0x5e800001, 0x5e800001);
+ // The largest int32_t that fits in a float.
+ TestUScvtf32Helper(0x000000007fffff80, 0x4effffff, 0x4effffff);
+ // Values that would be negative if treated as an int32_t.
+ TestUScvtf32Helper(0x00000000ffffff00, 0x4f7fffff, 0x4f7fffff);
+ TestUScvtf32Helper(0x0000000080000000, 0x4f000000, 0x4f000000);
+ TestUScvtf32Helper(0x0000000080000100, 0x4f000001, 0x4f000001);
+ // The largest int64_t that fits in a float.
+ TestUScvtf32Helper(0x7fffff8000000000, 0x5effffff, 0x5effffff);
+ // Check for bit pattern reproduction.
+ TestUScvtf32Helper(0x0000000000876543, 0x4b076543, 0x4b076543);
+
+ // Simple conversions of negative int64_t values. These require no rounding,
+ // and the results should not depend on the rounding mode.
+ TestUScvtf32Helper(0xfffffc0000000000, 0xd4800000, 0x5f7ffffc);
+ TestUScvtf32Helper(0xc000000000000000, 0xde800000, 0x5f400000);
+
+ // Conversions which require rounding.
+ TestUScvtf32Helper(0x0000800000000000, 0x57000000, 0x57000000);
+ TestUScvtf32Helper(0x0000800000000001, 0x57000000, 0x57000000);
+ TestUScvtf32Helper(0x0000800000800000, 0x57000000, 0x57000000);
+ TestUScvtf32Helper(0x0000800000800001, 0x57000001, 0x57000001);
+ TestUScvtf32Helper(0x0000800001000000, 0x57000001, 0x57000001);
+ TestUScvtf32Helper(0x0000800001000001, 0x57000001, 0x57000001);
+ TestUScvtf32Helper(0x0000800001800000, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800001800001, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800002000000, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800002000001, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800002800000, 0x57000002, 0x57000002);
+ TestUScvtf32Helper(0x0000800002800001, 0x57000003, 0x57000003);
+ TestUScvtf32Helper(0x0000800003000000, 0x57000003, 0x57000003);
+ // Check rounding of negative int64_t values (and large uint64_t values).
+ TestUScvtf32Helper(0x8000000000000000, 0xdf000000, 0x5f000000);
+ TestUScvtf32Helper(0x8000000000000001, 0xdf000000, 0x5f000000);
+ TestUScvtf32Helper(0x8000004000000000, 0xdf000000, 0x5f000000);
+ TestUScvtf32Helper(0x8000004000000001, 0xdeffffff, 0x5f000000);
+ TestUScvtf32Helper(0x8000008000000000, 0xdeffffff, 0x5f000000);
+ TestUScvtf32Helper(0x8000008000000001, 0xdeffffff, 0x5f000001);
+ TestUScvtf32Helper(0x800000c000000000, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x800000c000000001, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x8000010000000000, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x8000010000000001, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x8000014000000000, 0xdefffffe, 0x5f000001);
+ TestUScvtf32Helper(0x8000014000000001, 0xdefffffd, 0x5f000001);
+ TestUScvtf32Helper(0x8000018000000000, 0xdefffffd, 0x5f000002);
+ // Round up to produce a result that's too big for the input to represent.
+ TestUScvtf32Helper(0x000000007fffffc0, 0x4f000000, 0x4f000000);
+ TestUScvtf32Helper(0x000000007fffffff, 0x4f000000, 0x4f000000);
+ TestUScvtf32Helper(0x00000000ffffff80, 0x4f800000, 0x4f800000);
+ TestUScvtf32Helper(0x00000000ffffffff, 0x4f800000, 0x4f800000);
+ TestUScvtf32Helper(0x7fffffc000000000, 0x5f000000, 0x5f000000);
+ TestUScvtf32Helper(0x7fffffffffffffff, 0x5f000000, 0x5f000000);
+ TestUScvtf32Helper(0xffffff8000000000, 0xd3000000, 0x5f800000);
+ TestUScvtf32Helper(0xffffffffffffffff, 0xbf800000, 0x5f800000);
+}
+
+
+TEST(system_mrs) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 1);
+ __ Mov(w2, 0x80000000);
+
+ // Set the Z and C flags.
+ __ Cmp(w0, w0);
+ __ Mrs(x3, NZCV);
+
+ // Set the N flag.
+ __ Cmp(w0, w1);
+ __ Mrs(x4, NZCV);
+
+ // Set the Z, C and V flags.
+ __ Adds(w0, w2, w2);
+ __ Mrs(x5, NZCV);
+
+ // Read the default FPCR.
+ __ Mrs(x6, FPCR);
+ END();
+
+ RUN();
+
+ // NZCV
+ ASSERT_EQUAL_32(ZCFlag, w3);
+ ASSERT_EQUAL_32(NFlag, w4);
+ ASSERT_EQUAL_32(ZCVFlag, w5);
+
+ // FPCR
+ // The default FPCR on Linux-based platforms is 0.
+ ASSERT_EQUAL_32(0, w6);
+
+ TEARDOWN();
+}
+
+
+TEST(system_msr) {
+ INIT_V8();
+ // All FPCR fields that must be implemented: AHP, DN, FZ, RMode
+ const uint64_t fpcr_core = 0x07c00000;
+
+ // All FPCR fields (including fields which may be read-as-zero):
+ // Stride, Len
+ // IDE, IXE, UFE, OFE, DZE, IOE
+ const uint64_t fpcr_all = fpcr_core | 0x00379f00;
+
+ SETUP();
+
+ START();
+ __ Mov(w0, 0);
+ __ Mov(w1, 0x7fffffff);
+
+ __ Mov(x7, 0);
+
+ __ Mov(x10, NVFlag);
+ __ Cmp(w0, w0); // Set Z and C.
+ __ Msr(NZCV, x10); // Set N and V.
+ // The Msr should have overwritten every flag set by the Cmp.
+ __ Cinc(x7, x7, mi); // N
+ __ Cinc(x7, x7, ne); // !Z
+ __ Cinc(x7, x7, lo); // !C
+ __ Cinc(x7, x7, vs); // V
+
+ __ Mov(x10, ZCFlag);
+ __ Cmn(w1, w1); // Set N and V.
+ __ Msr(NZCV, x10); // Set Z and C.
+ // The Msr should have overwritten every flag set by the Cmn.
+ __ Cinc(x7, x7, pl); // !N
+ __ Cinc(x7, x7, eq); // Z
+ __ Cinc(x7, x7, hs); // C
+ __ Cinc(x7, x7, vc); // !V
+
+ // All core FPCR fields must be writable.
+ __ Mov(x8, fpcr_core);
+ __ Msr(FPCR, x8);
+ __ Mrs(x8, FPCR);
+
+ // All FPCR fields, including optional ones. This part of the test doesn't
+ // achieve much other than ensuring that supported fields can be cleared by
+ // the next test.
+ __ Mov(x9, fpcr_all);
+ __ Msr(FPCR, x9);
+ __ Mrs(x9, FPCR);
+ __ And(x9, x9, fpcr_core);
+
+ // The undefined bits must ignore writes.
+ // It's conceivable that a future version of the architecture could use these
+ // fields (making this test fail), but in the meantime this is a useful test
+ // for the simulator.
+ __ Mov(x10, ~fpcr_all);
+ __ Msr(FPCR, x10);
+ __ Mrs(x10, FPCR);
+
+ END();
+
+ RUN();
+
+ // We should have incremented x7 (from 0) exactly 8 times.
+ ASSERT_EQUAL_64(8, x7);
+
+ ASSERT_EQUAL_64(fpcr_core, x8);
+ ASSERT_EQUAL_64(fpcr_core, x9);
+ ASSERT_EQUAL_64(0, x10);
+
+ TEARDOWN();
+}
+
+
+TEST(system_nop) {
+ INIT_V8();
+ SETUP();
+ RegisterDump before;
+
+ START();
+ before.Dump(&masm);
+ __ Nop();
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_REGISTERS(before);
+ ASSERT_EQUAL_NZCV(before.flags_nzcv());
+
+ TEARDOWN();
+}
+
+
+TEST(zero_dest) {
+ INIT_V8();
+ SETUP();
+ RegisterDump before;
+
+ START();
+ // Preserve the system stack pointer, in case we clobber it.
+ __ Mov(x30, csp);
+ // Initialize the other registers used in this test.
+ uint64_t literal_base = 0x0100001000100101UL;
+ __ Mov(x0, 0);
+ __ Mov(x1, literal_base);
+ for (unsigned i = 2; i < x30.code(); i++) {
+ __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
+ }
+ before.Dump(&masm);
+
+ // All of these instructions should be NOPs in these forms, but have
+ // alternate forms which can write into the stack pointer.
+ __ add(xzr, x0, x1);
+ __ add(xzr, x1, xzr);
+ __ add(xzr, xzr, x1);
+
+ __ and_(xzr, x0, x2);
+ __ and_(xzr, x2, xzr);
+ __ and_(xzr, xzr, x2);
+
+ __ bic(xzr, x0, x3);
+ __ bic(xzr, x3, xzr);
+ __ bic(xzr, xzr, x3);
+
+ __ eon(xzr, x0, x4);
+ __ eon(xzr, x4, xzr);
+ __ eon(xzr, xzr, x4);
+
+ __ eor(xzr, x0, x5);
+ __ eor(xzr, x5, xzr);
+ __ eor(xzr, xzr, x5);
+
+ __ orr(xzr, x0, x6);
+ __ orr(xzr, x6, xzr);
+ __ orr(xzr, xzr, x6);
+
+ __ sub(xzr, x0, x7);
+ __ sub(xzr, x7, xzr);
+ __ sub(xzr, xzr, x7);
+
+ // Swap the saved system stack pointer with the real one. If csp was written
+ // during the test, it will show up in x30. This is done because the test
+ // framework assumes that csp will be valid at the end of the test.
+ __ Mov(x29, x30);
+ __ Mov(x30, csp);
+ __ Mov(csp, x29);
+ // We used x29 as a scratch register, so reset it to make sure it doesn't
+ // trigger a test failure.
+ __ Add(x29, x28, x1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_REGISTERS(before);
+ ASSERT_EQUAL_NZCV(before.flags_nzcv());
+
+ TEARDOWN();
+}
+
+
+TEST(zero_dest_setflags) {
+ INIT_V8();
+ SETUP();
+ RegisterDump before;
+
+ START();
+ // Preserve the system stack pointer, in case we clobber it.
+ __ Mov(x30, csp);
+ // Initialize the other registers used in this test.
+ uint64_t literal_base = 0x0100001000100101UL;
+ __ Mov(x0, 0);
+ __ Mov(x1, literal_base);
+ for (int i = 2; i < 30; i++) {
+ __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
+ }
+ before.Dump(&masm);
+
+ // All of these instructions should only write to the flags in these forms,
+ // but have alternate forms which can write into the stack pointer.
+ __ adds(xzr, x0, Operand(x1, UXTX));
+ __ adds(xzr, x1, Operand(xzr, UXTX));
+ __ adds(xzr, x1, 1234);
+ __ adds(xzr, x0, x1);
+ __ adds(xzr, x1, xzr);
+ __ adds(xzr, xzr, x1);
+
+ __ ands(xzr, x2, ~0xf);
+ __ ands(xzr, xzr, ~0xf);
+ __ ands(xzr, x0, x2);
+ __ ands(xzr, x2, xzr);
+ __ ands(xzr, xzr, x2);
+
+ __ bics(xzr, x3, ~0xf);
+ __ bics(xzr, xzr, ~0xf);
+ __ bics(xzr, x0, x3);
+ __ bics(xzr, x3, xzr);
+ __ bics(xzr, xzr, x3);
+
+ __ subs(xzr, x0, Operand(x3, UXTX));
+ __ subs(xzr, x3, Operand(xzr, UXTX));
+ __ subs(xzr, x3, 1234);
+ __ subs(xzr, x0, x3);
+ __ subs(xzr, x3, xzr);
+ __ subs(xzr, xzr, x3);
+
+ // Swap the saved system stack pointer with the real one. If csp was written
+ // during the test, it will show up in x30. This is done because the test
+ // framework assumes that csp will be valid at the end of the test.
+ __ Mov(x29, x30);
+ __ Mov(x30, csp);
+ __ Mov(csp, x29);
+ // We used x29 as a scratch register, so reset it to make sure it doesn't
+ // trigger a test failure.
+ __ Add(x29, x28, x1);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_REGISTERS(before);
+
+ TEARDOWN();
+}
+
+
+TEST(register_bit) {
+ // No code generation takes place in this test, so no need to setup and
+ // teardown.
+
+ // Simple tests.
+ CHECK(x0.Bit() == (1UL << 0));
+ CHECK(x1.Bit() == (1UL << 1));
+ CHECK(x10.Bit() == (1UL << 10));
+
+ // AAPCS64 definitions.
+ CHECK(fp.Bit() == (1UL << kFramePointerRegCode));
+ CHECK(lr.Bit() == (1UL << kLinkRegCode));
+
+ // Fixed (hardware) definitions.
+ CHECK(xzr.Bit() == (1UL << kZeroRegCode));
+
+ // Internal ABI definitions.
+ CHECK(jssp.Bit() == (1UL << kJSSPCode));
+ CHECK(csp.Bit() == (1UL << kSPRegInternalCode));
+ CHECK(csp.Bit() != xzr.Bit());
+
+ // xn.Bit() == wn.Bit() at all times, for the same n.
+ CHECK(x0.Bit() == w0.Bit());
+ CHECK(x1.Bit() == w1.Bit());
+ CHECK(x10.Bit() == w10.Bit());
+ CHECK(jssp.Bit() == wjssp.Bit());
+ CHECK(xzr.Bit() == wzr.Bit());
+ CHECK(csp.Bit() == wcsp.Bit());
+}
+
+
+TEST(stack_pointer_override) {
+ // This test generates some stack maintenance code, but the test only checks
+ // the reported state.
+ INIT_V8();
+ SETUP();
+ START();
+
+ // The default stack pointer in V8 is jssp, but for compatibility with W16,
+ // the test framework sets it to csp before calling the test.
+ CHECK(csp.Is(__ StackPointer()));
+ __ SetStackPointer(x0);
+ CHECK(x0.Is(__ StackPointer()));
+ __ SetStackPointer(jssp);
+ CHECK(jssp.Is(__ StackPointer()));
+ __ SetStackPointer(csp);
+ CHECK(csp.Is(__ StackPointer()));
+
+ END();
+ RUN();
+ TEARDOWN();
+}
+
+
+TEST(peek_poke_simple) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ static const RegList x0_to_x3 = x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit();
+ static const RegList x10_to_x13 = x10.Bit() | x11.Bit() |
+ x12.Bit() | x13.Bit();
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ // Initialize the registers.
+ __ Mov(x0, literal_base);
+ __ Add(x1, x0, x0);
+ __ Add(x2, x1, x0);
+ __ Add(x3, x2, x0);
+
+ __ Claim(4);
+
+ // Simple exchange.
+ // After this test:
+ // x0-x3 should be unchanged.
+ // w10-w13 should contain the lower words of x0-x3.
+ __ Poke(x0, 0);
+ __ Poke(x1, 8);
+ __ Poke(x2, 16);
+ __ Poke(x3, 24);
+ Clobber(&masm, x0_to_x3);
+ __ Peek(x0, 0);
+ __ Peek(x1, 8);
+ __ Peek(x2, 16);
+ __ Peek(x3, 24);
+
+ __ Poke(w0, 0);
+ __ Poke(w1, 4);
+ __ Poke(w2, 8);
+ __ Poke(w3, 12);
+ Clobber(&masm, x10_to_x13);
+ __ Peek(w10, 0);
+ __ Peek(w11, 4);
+ __ Peek(w12, 8);
+ __ Peek(w13, 12);
+
+ __ Drop(4);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_64(literal_base * 1, x0);
+ ASSERT_EQUAL_64(literal_base * 2, x1);
+ ASSERT_EQUAL_64(literal_base * 3, x2);
+ ASSERT_EQUAL_64(literal_base * 4, x3);
+
+ ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
+ ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
+ ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
+ ASSERT_EQUAL_64((literal_base * 4) & 0xffffffff, x13);
+
+ TEARDOWN();
+}
+
+
+TEST(peek_poke_unaligned) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ // Initialize the registers.
+ __ Mov(x0, literal_base);
+ __ Add(x1, x0, x0);
+ __ Add(x2, x1, x0);
+ __ Add(x3, x2, x0);
+ __ Add(x4, x3, x0);
+ __ Add(x5, x4, x0);
+ __ Add(x6, x5, x0);
+
+ __ Claim(4);
+
+ // Unaligned exchanges.
+ // After this test:
+ // x0-x6 should be unchanged.
+ // w10-w12 should contain the lower words of x0-x2.
+ __ Poke(x0, 1);
+ Clobber(&masm, x0.Bit());
+ __ Peek(x0, 1);
+ __ Poke(x1, 2);
+ Clobber(&masm, x1.Bit());
+ __ Peek(x1, 2);
+ __ Poke(x2, 3);
+ Clobber(&masm, x2.Bit());
+ __ Peek(x2, 3);
+ __ Poke(x3, 4);
+ Clobber(&masm, x3.Bit());
+ __ Peek(x3, 4);
+ __ Poke(x4, 5);
+ Clobber(&masm, x4.Bit());
+ __ Peek(x4, 5);
+ __ Poke(x5, 6);
+ Clobber(&masm, x5.Bit());
+ __ Peek(x5, 6);
+ __ Poke(x6, 7);
+ Clobber(&masm, x6.Bit());
+ __ Peek(x6, 7);
+
+ __ Poke(w0, 1);
+ Clobber(&masm, w10.Bit());
+ __ Peek(w10, 1);
+ __ Poke(w1, 2);
+ Clobber(&masm, w11.Bit());
+ __ Peek(w11, 2);
+ __ Poke(w2, 3);
+ Clobber(&masm, w12.Bit());
+ __ Peek(w12, 3);
+
+ __ Drop(4);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_64(literal_base * 1, x0);
+ ASSERT_EQUAL_64(literal_base * 2, x1);
+ ASSERT_EQUAL_64(literal_base * 3, x2);
+ ASSERT_EQUAL_64(literal_base * 4, x3);
+ ASSERT_EQUAL_64(literal_base * 5, x4);
+ ASSERT_EQUAL_64(literal_base * 6, x5);
+ ASSERT_EQUAL_64(literal_base * 7, x6);
+
+ ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
+ ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
+ ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
+
+ TEARDOWN();
+}
+
+
+TEST(peek_poke_endianness) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ // Initialize the registers.
+ __ Mov(x0, literal_base);
+ __ Add(x1, x0, x0);
+
+ __ Claim(4);
+
+ // Endianness tests.
+ // After this section:
+ // x4 should match x0[31:0]:x0[63:32]
+ // w5 should match w1[15:0]:w1[31:16]
+ __ Poke(x0, 0);
+ __ Poke(x0, 8);
+ __ Peek(x4, 4);
+
+ __ Poke(w1, 0);
+ __ Poke(w1, 4);
+ __ Peek(w5, 2);
+
+ __ Drop(4);
+
+ END();
+ RUN();
+
+ uint64_t x0_expected = literal_base * 1;
+ uint64_t x1_expected = literal_base * 2;
+ uint64_t x4_expected = (x0_expected << 32) | (x0_expected >> 32);
+ uint64_t x5_expected = ((x1_expected << 16) & 0xffff0000) |
+ ((x1_expected >> 16) & 0x0000ffff);
+
+ ASSERT_EQUAL_64(x0_expected, x0);
+ ASSERT_EQUAL_64(x1_expected, x1);
+ ASSERT_EQUAL_64(x4_expected, x4);
+ ASSERT_EQUAL_64(x5_expected, x5);
+
+ TEARDOWN();
+}
+
+
+TEST(peek_poke_mixed) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ // Initialize the registers.
+ __ Mov(x0, literal_base);
+ __ Add(x1, x0, x0);
+ __ Add(x2, x1, x0);
+ __ Add(x3, x2, x0);
+
+ __ Claim(4);
+
+ // Mix with other stack operations.
+ // After this section:
+ // x0-x3 should be unchanged.
+ // x6 should match x1[31:0]:x0[63:32]
+ // w7 should match x1[15:0]:x0[63:48]
+ __ Poke(x1, 8);
+ __ Poke(x0, 0);
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(x4, __ StackPointer());
+ __ SetStackPointer(x4);
+
+ __ Poke(wzr, 0); // Clobber the space we're about to drop.
+ __ Drop(1, kWRegSize);
+ __ Peek(x6, 0);
+ __ Claim(1);
+ __ Peek(w7, 10);
+ __ Poke(x3, 28);
+ __ Poke(xzr, 0); // Clobber the space we're about to drop.
+ __ Drop(1);
+ __ Poke(x2, 12);
+ __ Push(w0);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ __ Pop(x0, x1, x2, x3);
+
+ END();
+ RUN();
+
+ uint64_t x0_expected = literal_base * 1;
+ uint64_t x1_expected = literal_base * 2;
+ uint64_t x2_expected = literal_base * 3;
+ uint64_t x3_expected = literal_base * 4;
+ uint64_t x6_expected = (x1_expected << 32) | (x0_expected >> 32);
+ uint64_t x7_expected = ((x1_expected << 16) & 0xffff0000) |
+ ((x0_expected >> 48) & 0x0000ffff);
+
+ ASSERT_EQUAL_64(x0_expected, x0);
+ ASSERT_EQUAL_64(x1_expected, x1);
+ ASSERT_EQUAL_64(x2_expected, x2);
+ ASSERT_EQUAL_64(x3_expected, x3);
+ ASSERT_EQUAL_64(x6_expected, x6);
+ ASSERT_EQUAL_64(x7_expected, x7);
+
+ TEARDOWN();
+}
+
+
+// This enum is used only as an argument to the push-pop test helpers.
+enum PushPopMethod {
+ // Push or Pop using the Push and Pop methods, with blocks of up to four
+ // registers. (Smaller blocks will be used if necessary.)
+ PushPopByFour,
+
+ // Use Push<Size>RegList and Pop<Size>RegList to transfer the registers.
+ PushPopRegList
+};
+
+
+// The maximum number of registers that can be used by the PushPopJssp* tests,
+// where a reg_count field is provided.
+static int const kPushPopJsspMaxRegCount = -1;
+
+// Test a simple push-pop pattern:
+// * Claim <claim> bytes to set the stack alignment.
+// * Push <reg_count> registers with size <reg_size>.
+// * Clobber the register contents.
+// * Pop <reg_count> registers to restore the original contents.
+// * Drop <claim> bytes to restore the original stack pointer.
+//
+// Different push and pop methods can be specified independently to test for
+// proper word-endian behaviour.
+static void PushPopJsspSimpleHelper(int reg_count,
+ int claim,
+ int reg_size,
+ PushPopMethod push_method,
+ PushPopMethod pop_method) {
+ SETUP();
+
+ START();
+
+ // Registers x8 and x9 are used by the macro assembler for debug code (for
+ // example in 'Pop'), so we can't use them here. We can't use jssp because it
+ // will be the stack pointer for this test.
+ static RegList const allowed = ~(x8.Bit() | x9.Bit() | jssp.Bit());
+ if (reg_count == kPushPopJsspMaxRegCount) {
+ reg_count = CountSetBits(allowed, kNumberOfRegisters);
+ }
+ // Work out which registers to use, based on reg_size.
+ Register r[kNumberOfRegisters];
+ Register x[kNumberOfRegisters];
+ RegList list = PopulateRegisterArray(NULL, x, r, reg_size, reg_count,
+ allowed);
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ int i;
+
+ // Initialize the registers.
+ for (i = 0; i < reg_count; i++) {
+ // Always write into the X register, to ensure that the upper word is
+ // properly ignored by Push when testing W registers.
+ if (!x[i].IsZero()) {
+ __ Mov(x[i], literal_base * i);
+ }
+ }
+
+ // Claim memory first, as requested.
+ __ Claim(claim, kByteSizeInBytes);
+
+ switch (push_method) {
+ case PushPopByFour:
+ // Push high-numbered registers first (to the highest addresses).
+ for (i = reg_count; i >= 4; i -= 4) {
+ __ Push(r[i-1], r[i-2], r[i-3], r[i-4]);
+ }
+ // Finish off the leftovers.
+ switch (i) {
+ case 3: __ Push(r[2], r[1], r[0]); break;
+ case 2: __ Push(r[1], r[0]); break;
+ case 1: __ Push(r[0]); break;
+ default: ASSERT(i == 0); break;
+ }
+ break;
+ case PushPopRegList:
+ __ PushSizeRegList(list, reg_size);
+ break;
+ }
+
+ // Clobber all the registers, to ensure that they get repopulated by Pop.
+ Clobber(&masm, list);
+
+ switch (pop_method) {
+ case PushPopByFour:
+ // Pop low-numbered registers first (from the lowest addresses).
+ for (i = 0; i <= (reg_count-4); i += 4) {
+ __ Pop(r[i], r[i+1], r[i+2], r[i+3]);
+ }
+ // Finish off the leftovers.
+ switch (reg_count - i) {
+ case 3: __ Pop(r[i], r[i+1], r[i+2]); break;
+ case 2: __ Pop(r[i], r[i+1]); break;
+ case 1: __ Pop(r[i]); break;
+ default: ASSERT(i == reg_count); break;
+ }
+ break;
+ case PushPopRegList:
+ __ PopSizeRegList(list, reg_size);
+ break;
+ }
+
+ // Drop memory to restore jssp.
+ __ Drop(claim, kByteSizeInBytes);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ END();
+
+ RUN();
+
+ // Check that the register contents were preserved.
+ // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
+ // that the upper word was properly cleared by Pop.
+ literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
+ for (int i = 0; i < reg_count; i++) {
+ if (x[i].IsZero()) {
+ ASSERT_EQUAL_64(0, x[i]);
+ } else {
+ ASSERT_EQUAL_64(literal_base * i, x[i]);
+ }
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(push_pop_jssp_simple_32) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 0; count <= 8; count++) {
+ PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
+ PushPopByFour, PushPopByFour);
+ PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
+ PushPopByFour, PushPopRegList);
+ PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
+ PushPopRegList, PushPopByFour);
+ PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
+ PushPopRegList, PushPopRegList);
+ }
+ // Test with the maximum number of registers.
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
+ PushPopByFour, PushPopByFour);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
+ PushPopByFour, PushPopRegList);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
+ PushPopRegList, PushPopByFour);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
+ PushPopRegList, PushPopRegList);
+ }
+}
+
+
+TEST(push_pop_jssp_simple_64) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 0; count <= 8; count++) {
+ PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
+ PushPopByFour, PushPopByFour);
+ PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
+ PushPopByFour, PushPopRegList);
+ PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
+ PushPopRegList, PushPopByFour);
+ PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
+ PushPopRegList, PushPopRegList);
+ }
+ // Test with the maximum number of registers.
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
+ PushPopByFour, PushPopByFour);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
+ PushPopByFour, PushPopRegList);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
+ PushPopRegList, PushPopByFour);
+ PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
+ PushPopRegList, PushPopRegList);
+ }
+}
+
+
+// The maximum number of registers that can be used by the PushPopFPJssp* tests,
+// where a reg_count field is provided.
+static int const kPushPopFPJsspMaxRegCount = -1;
+
+// Test a simple push-pop pattern:
+// * Claim <claim> bytes to set the stack alignment.
+// * Push <reg_count> FP registers with size <reg_size>.
+// * Clobber the register contents.
+// * Pop <reg_count> FP registers to restore the original contents.
+// * Drop <claim> bytes to restore the original stack pointer.
+//
+// Different push and pop methods can be specified independently to test for
+// proper word-endian behaviour.
+static void PushPopFPJsspSimpleHelper(int reg_count,
+ int claim,
+ int reg_size,
+ PushPopMethod push_method,
+ PushPopMethod pop_method) {
+ SETUP();
+
+ START();
+
+ // We can use any floating-point register. None of them are reserved for
+ // debug code, for example.
+ static RegList const allowed = ~0;
+ if (reg_count == kPushPopFPJsspMaxRegCount) {
+ reg_count = CountSetBits(allowed, kNumberOfFPRegisters);
+ }
+ // Work out which registers to use, based on reg_size.
+ FPRegister v[kNumberOfRegisters];
+ FPRegister d[kNumberOfRegisters];
+ RegList list = PopulateFPRegisterArray(NULL, d, v, reg_size, reg_count,
+ allowed);
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied (using an integer) by small values (such as a register
+ // index), this value is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ // * It is never a floating-point NaN, and will therefore always compare
+ // equal to itself.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ int i;
+
+ // Initialize the registers, using X registers to load the literal.
+ __ Mov(x0, 0);
+ __ Mov(x1, literal_base);
+ for (i = 0; i < reg_count; i++) {
+ // Always write into the D register, to ensure that the upper word is
+ // properly ignored by Push when testing S registers.
+ __ Fmov(d[i], x0);
+ // Calculate the next literal.
+ __ Add(x0, x0, x1);
+ }
+
+ // Claim memory first, as requested.
+ __ Claim(claim, kByteSizeInBytes);
+
+ switch (push_method) {
+ case PushPopByFour:
+ // Push high-numbered registers first (to the highest addresses).
+ for (i = reg_count; i >= 4; i -= 4) {
+ __ Push(v[i-1], v[i-2], v[i-3], v[i-4]);
+ }
+ // Finish off the leftovers.
+ switch (i) {
+ case 3: __ Push(v[2], v[1], v[0]); break;
+ case 2: __ Push(v[1], v[0]); break;
+ case 1: __ Push(v[0]); break;
+ default: ASSERT(i == 0); break;
+ }
+ break;
+ case PushPopRegList:
+ __ PushSizeRegList(list, reg_size, CPURegister::kFPRegister);
+ break;
+ }
+
+ // Clobber all the registers, to ensure that they get repopulated by Pop.
+ ClobberFP(&masm, list);
+
+ switch (pop_method) {
+ case PushPopByFour:
+ // Pop low-numbered registers first (from the lowest addresses).
+ for (i = 0; i <= (reg_count-4); i += 4) {
+ __ Pop(v[i], v[i+1], v[i+2], v[i+3]);
+ }
+ // Finish off the leftovers.
+ switch (reg_count - i) {
+ case 3: __ Pop(v[i], v[i+1], v[i+2]); break;
+ case 2: __ Pop(v[i], v[i+1]); break;
+ case 1: __ Pop(v[i]); break;
+ default: ASSERT(i == reg_count); break;
+ }
+ break;
+ case PushPopRegList:
+ __ PopSizeRegList(list, reg_size, CPURegister::kFPRegister);
+ break;
+ }
+
+ // Drop memory to restore jssp.
+ __ Drop(claim, kByteSizeInBytes);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ END();
+
+ RUN();
+
+ // Check that the register contents were preserved.
+ // Always use ASSERT_EQUAL_FP64, even when testing S registers, so we can
+ // test that the upper word was properly cleared by Pop.
+ literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
+ for (int i = 0; i < reg_count; i++) {
+ uint64_t literal = literal_base * i;
+ double expected;
+ memcpy(&expected, &literal, sizeof(expected));
+ ASSERT_EQUAL_FP64(expected, d[i]);
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(push_pop_fp_jssp_simple_32) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 0; count <= 8; count++) {
+ PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
+ PushPopByFour, PushPopByFour);
+ PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
+ PushPopByFour, PushPopRegList);
+ PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
+ PushPopRegList, PushPopByFour);
+ PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
+ PushPopRegList, PushPopRegList);
+ }
+ // Test with the maximum number of registers.
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
+ PushPopByFour, PushPopByFour);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
+ PushPopByFour, PushPopRegList);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
+ PushPopRegList, PushPopByFour);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
+ PushPopRegList, PushPopRegList);
+ }
+}
+
+
+TEST(push_pop_fp_jssp_simple_64) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 0; count <= 8; count++) {
+ PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
+ PushPopByFour, PushPopByFour);
+ PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
+ PushPopByFour, PushPopRegList);
+ PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
+ PushPopRegList, PushPopByFour);
+ PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
+ PushPopRegList, PushPopRegList);
+ }
+ // Test with the maximum number of registers.
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
+ PushPopByFour, PushPopByFour);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
+ PushPopByFour, PushPopRegList);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
+ PushPopRegList, PushPopByFour);
+ PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
+ PushPopRegList, PushPopRegList);
+ }
+}
+
+
+// Push and pop data using an overlapping combination of Push/Pop and
+// RegList-based methods.
+static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
+ SETUP();
+
+ // Registers x8 and x9 are used by the macro assembler for debug code (for
+ // example in 'Pop'), so we can't use them here. We can't use jssp because it
+ // will be the stack pointer for this test.
+ static RegList const allowed =
+ ~(x8.Bit() | x9.Bit() | jssp.Bit() | xzr.Bit());
+ // Work out which registers to use, based on reg_size.
+ Register r[10];
+ Register x[10];
+ PopulateRegisterArray(NULL, x, r, reg_size, 10, allowed);
+
+ // Calculate some handy register lists.
+ RegList r0_to_r3 = 0;
+ for (int i = 0; i <= 3; i++) {
+ r0_to_r3 |= x[i].Bit();
+ }
+ RegList r4_to_r5 = 0;
+ for (int i = 4; i <= 5; i++) {
+ r4_to_r5 |= x[i].Bit();
+ }
+ RegList r6_to_r9 = 0;
+ for (int i = 6; i <= 9; i++) {
+ r6_to_r9 |= x[i].Bit();
+ }
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ uint64_t literal_base = 0x0100001000100101UL;
+
+ START();
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ // Claim memory first, as requested.
+ __ Claim(claim, kByteSizeInBytes);
+
+ __ Mov(x[3], literal_base * 3);
+ __ Mov(x[2], literal_base * 2);
+ __ Mov(x[1], literal_base * 1);
+ __ Mov(x[0], literal_base * 0);
+
+ __ PushSizeRegList(r0_to_r3, reg_size);
+ __ Push(r[3], r[2]);
+
+ Clobber(&masm, r0_to_r3);
+ __ PopSizeRegList(r0_to_r3, reg_size);
+
+ __ Push(r[2], r[1], r[3], r[0]);
+
+ Clobber(&masm, r4_to_r5);
+ __ Pop(r[4], r[5]);
+ Clobber(&masm, r6_to_r9);
+ __ Pop(r[6], r[7], r[8], r[9]);
+
+ // Drop memory to restore jssp.
+ __ Drop(claim, kByteSizeInBytes);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ END();
+
+ RUN();
+
+ // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
+ // that the upper word was properly cleared by Pop.
+ literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
+
+ ASSERT_EQUAL_64(literal_base * 3, x[9]);
+ ASSERT_EQUAL_64(literal_base * 2, x[8]);
+ ASSERT_EQUAL_64(literal_base * 0, x[7]);
+ ASSERT_EQUAL_64(literal_base * 3, x[6]);
+ ASSERT_EQUAL_64(literal_base * 1, x[5]);
+ ASSERT_EQUAL_64(literal_base * 2, x[4]);
+
+ TEARDOWN();
+}
+
+
+TEST(push_pop_jssp_mixed_methods_64) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ PushPopJsspMixedMethodsHelper(claim, kXRegSizeInBits);
+ }
+}
+
+
+TEST(push_pop_jssp_mixed_methods_32) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ PushPopJsspMixedMethodsHelper(claim, kWRegSizeInBits);
+ }
+}
+
+
+// Push and pop data using overlapping X- and W-sized quantities.
+static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
+ // This test emits rather a lot of code.
+ SETUP_SIZE(BUF_SIZE * 2);
+
+ // Work out which registers to use, based on reg_size.
+ Register tmp = x8;
+ static RegList const allowed = ~(tmp.Bit() | jssp.Bit());
+ if (reg_count == kPushPopJsspMaxRegCount) {
+ reg_count = CountSetBits(allowed, kNumberOfRegisters);
+ }
+ Register w[kNumberOfRegisters];
+ Register x[kNumberOfRegisters];
+ RegList list = PopulateRegisterArray(w, x, NULL, 0, reg_count, allowed);
+
+ // The number of W-sized slots we expect to pop. When we pop, we alternate
+ // between W and X registers, so we need reg_count*1.5 W-sized slots.
+ int const requested_w_slots = reg_count + reg_count / 2;
+
+ // Track what _should_ be on the stack, using W-sized slots.
+ static int const kMaxWSlots = kNumberOfRegisters + kNumberOfRegisters / 2;
+ uint32_t stack[kMaxWSlots];
+ for (int i = 0; i < kMaxWSlots; i++) {
+ stack[i] = 0xdeadbeef;
+ }
+
+ // The literal base is chosen to have two useful properties:
+ // * When multiplied by small values (such as a register index), this value
+ // is clearly readable in the result.
+ // * The value is not formed from repeating fixed-size smaller values, so it
+ // can be used to detect endianness-related errors.
+ static uint64_t const literal_base = 0x0100001000100101UL;
+ static uint64_t const literal_base_hi = literal_base >> 32;
+ static uint64_t const literal_base_lo = literal_base & 0xffffffff;
+ static uint64_t const literal_base_w = literal_base & 0xffffffff;
+
+ START();
+ {
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ // Initialize the registers.
+ for (int i = 0; i < reg_count; i++) {
+ // Always write into the X register, to ensure that the upper word is
+ // properly ignored by Push when testing W registers.
+ if (!x[i].IsZero()) {
+ __ Mov(x[i], literal_base * i);
+ }
+ }
+
+ // Claim memory first, as requested.
+ __ Claim(claim, kByteSizeInBytes);
+
+ // The push-pop pattern is as follows:
+ // Push: Pop:
+ // x[0](hi) -> w[0]
+ // x[0](lo) -> x[1](hi)
+ // w[1] -> x[1](lo)
+ // w[1] -> w[2]
+ // x[2](hi) -> x[2](hi)
+ // x[2](lo) -> x[2](lo)
+ // x[2](hi) -> w[3]
+ // x[2](lo) -> x[4](hi)
+ // x[2](hi) -> x[4](lo)
+ // x[2](lo) -> w[5]
+ // w[3] -> x[5](hi)
+ // w[3] -> x[6](lo)
+ // w[3] -> w[7]
+ // w[3] -> x[8](hi)
+ // x[4](hi) -> x[8](lo)
+ // x[4](lo) -> w[9]
+ // ... pattern continues ...
+ //
+ // That is, registers are pushed starting with the lower numbers,
+ // alternating between x and w registers, and pushing i%4+1 copies of each,
+ // where i is the register number.
+ // Registers are popped starting with the higher numbers one-by-one,
+ // alternating between x and w registers, but only popping one at a time.
+ //
+ // This pattern provides a wide variety of alignment effects and overlaps.
+
+ // ---- Push ----
+
+ int active_w_slots = 0;
+ for (int i = 0; active_w_slots < requested_w_slots; i++) {
+ ASSERT(i < reg_count);
+ // In order to test various arguments to PushMultipleTimes, and to try to
+ // exercise different alignment and overlap effects, we push each
+ // register a different number of times.
+ int times = i % 4 + 1;
+ if (i & 1) {
+ // Push odd-numbered registers as W registers.
+ if (i & 2) {
+ __ PushMultipleTimes(w[i], times);
+ } else {
+ // Use a register to specify the count.
+ __ Mov(tmp.W(), times);
+ __ PushMultipleTimes(w[i], tmp.W());
+ }
+ // Fill in the expected stack slots.
+ for (int j = 0; j < times; j++) {
+ if (w[i].Is(wzr)) {
+ // The zero register always writes zeroes.
+ stack[active_w_slots++] = 0;
+ } else {
+ stack[active_w_slots++] = literal_base_w * i;
+ }
+ }
+ } else {
+ // Push even-numbered registers as X registers.
+ if (i & 2) {
+ __ PushMultipleTimes(x[i], times);
+ } else {
+ // Use a register to specify the count.
+ __ Mov(tmp, times);
+ __ PushMultipleTimes(x[i], tmp);
+ }
+ // Fill in the expected stack slots.
+ for (int j = 0; j < times; j++) {
+ if (x[i].IsZero()) {
+ // The zero register always writes zeroes.
+ stack[active_w_slots++] = 0;
+ stack[active_w_slots++] = 0;
+ } else {
+ stack[active_w_slots++] = literal_base_hi * i;
+ stack[active_w_slots++] = literal_base_lo * i;
+ }
+ }
+ }
+ }
+ // Because we were pushing several registers at a time, we probably pushed
+ // more than we needed to.
+ if (active_w_slots > requested_w_slots) {
+ __ Drop(active_w_slots - requested_w_slots, kWRegSize);
+ // Bump the number of active W-sized slots back to where it should be,
+ // and fill the empty space with a dummy value.
+ do {
+ stack[active_w_slots--] = 0xdeadbeef;
+ } while (active_w_slots > requested_w_slots);
+ }
+
+ // ---- Pop ----
+
+ Clobber(&masm, list);
+
+ // If popping an even number of registers, the first one will be X-sized.
+ // Otherwise, the first one will be W-sized.
+ bool next_is_64 = !(reg_count & 1);
+ for (int i = reg_count-1; i >= 0; i--) {
+ if (next_is_64) {
+ __ Pop(x[i]);
+ active_w_slots -= 2;
+ } else {
+ __ Pop(w[i]);
+ active_w_slots -= 1;
+ }
+ next_is_64 = !next_is_64;
+ }
+ ASSERT(active_w_slots == 0);
+
+ // Drop memory to restore jssp.
+ __ Drop(claim, kByteSizeInBytes);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+ }
+
+ END();
+
+ RUN();
+
+ int slot = 0;
+ for (int i = 0; i < reg_count; i++) {
+ // Even-numbered registers were written as W registers.
+ // Odd-numbered registers were written as X registers.
+ bool expect_64 = (i & 1);
+ uint64_t expected;
+
+ if (expect_64) {
+ uint64_t hi = stack[slot++];
+ uint64_t lo = stack[slot++];
+ expected = (hi << 32) | lo;
+ } else {
+ expected = stack[slot++];
+ }
+
+ // Always use ASSERT_EQUAL_64, even when testing W registers, so we can
+ // test that the upper word was properly cleared by Pop.
+ if (x[i].IsZero()) {
+ ASSERT_EQUAL_64(0, x[i]);
+ } else {
+ ASSERT_EQUAL_64(expected, x[i]);
+ }
+ }
+ ASSERT(slot == requested_w_slots);
+
+ TEARDOWN();
+}
+
+
+TEST(push_pop_jssp_wx_overlap) {
+ INIT_V8();
+ for (int claim = 0; claim <= 8; claim++) {
+ for (int count = 1; count <= 8; count++) {
+ PushPopJsspWXOverlapHelper(count, claim);
+ PushPopJsspWXOverlapHelper(count, claim);
+ PushPopJsspWXOverlapHelper(count, claim);
+ PushPopJsspWXOverlapHelper(count, claim);
+ }
+ // Test with the maximum number of registers.
+ PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
+ PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
+ PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
+ PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
+ }
+}
+
+
+TEST(push_pop_csp) {
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ ASSERT(csp.Is(__ StackPointer()));
+
+ __ Mov(x3, 0x3333333333333333UL);
+ __ Mov(x2, 0x2222222222222222UL);
+ __ Mov(x1, 0x1111111111111111UL);
+ __ Mov(x0, 0x0000000000000000UL);
+ __ Claim(2);
+ __ PushXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
+ __ Push(x3, x2);
+ __ PopXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
+ __ Push(x2, x1, x3, x0);
+ __ Pop(x4, x5);
+ __ Pop(x6, x7, x8, x9);
+
+ __ Claim(2);
+ __ PushWRegList(w0.Bit() | w1.Bit() | w2.Bit() | w3.Bit());
+ __ Push(w3, w1, w2, w0);
+ __ PopWRegList(w10.Bit() | w11.Bit() | w12.Bit() | w13.Bit());
+ __ Pop(w14, w15, w16, w17);
+
+ __ Claim(2);
+ __ Push(w2, w2, w1, w1);
+ __ Push(x3, x3);
+ __ Pop(w18, w19, w20, w21);
+ __ Pop(x22, x23);
+
+ __ Claim(2);
+ __ PushXRegList(x1.Bit() | x22.Bit());
+ __ PopXRegList(x24.Bit() | x26.Bit());
+
+ __ Claim(2);
+ __ PushWRegList(w1.Bit() | w2.Bit() | w4.Bit() | w22.Bit());
+ __ PopWRegList(w25.Bit() | w27.Bit() | w28.Bit() | w29.Bit());
+
+ __ Claim(2);
+ __ PushXRegList(0);
+ __ PopXRegList(0);
+ __ PushXRegList(0xffffffff);
+ __ PopXRegList(0xffffffff);
+ __ Drop(12);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1111111111111111UL, x3);
+ ASSERT_EQUAL_64(0x0000000000000000UL, x2);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x1);
+ ASSERT_EQUAL_64(0x2222222222222222UL, x0);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x9);
+ ASSERT_EQUAL_64(0x2222222222222222UL, x8);
+ ASSERT_EQUAL_64(0x0000000000000000UL, x7);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x6);
+ ASSERT_EQUAL_64(0x1111111111111111UL, x5);
+ ASSERT_EQUAL_64(0x2222222222222222UL, x4);
+
+ ASSERT_EQUAL_32(0x11111111U, w13);
+ ASSERT_EQUAL_32(0x33333333U, w12);
+ ASSERT_EQUAL_32(0x00000000U, w11);
+ ASSERT_EQUAL_32(0x22222222U, w10);
+ ASSERT_EQUAL_32(0x11111111U, w17);
+ ASSERT_EQUAL_32(0x00000000U, w16);
+ ASSERT_EQUAL_32(0x33333333U, w15);
+ ASSERT_EQUAL_32(0x22222222U, w14);
+
+ ASSERT_EQUAL_32(0x11111111U, w18);
+ ASSERT_EQUAL_32(0x11111111U, w19);
+ ASSERT_EQUAL_32(0x11111111U, w20);
+ ASSERT_EQUAL_32(0x11111111U, w21);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x22);
+ ASSERT_EQUAL_64(0x0000000000000000UL, x23);
+
+ ASSERT_EQUAL_64(0x3333333333333333UL, x24);
+ ASSERT_EQUAL_64(0x3333333333333333UL, x26);
+
+ ASSERT_EQUAL_32(0x33333333U, w25);
+ ASSERT_EQUAL_32(0x00000000U, w27);
+ ASSERT_EQUAL_32(0x22222222U, w28);
+ ASSERT_EQUAL_32(0x33333333U, w29);
+ TEARDOWN();
+}
+
+
+TEST(push_queued) {
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ MacroAssembler::PushPopQueue queue(&masm);
+
+ // Queue up registers.
+ queue.Queue(x0);
+ queue.Queue(x1);
+ queue.Queue(x2);
+ queue.Queue(x3);
+
+ queue.Queue(w4);
+ queue.Queue(w5);
+ queue.Queue(w6);
+
+ queue.Queue(d0);
+ queue.Queue(d1);
+
+ queue.Queue(s2);
+
+ __ Mov(x0, 0x1234000000000000);
+ __ Mov(x1, 0x1234000100010001);
+ __ Mov(x2, 0x1234000200020002);
+ __ Mov(x3, 0x1234000300030003);
+ __ Mov(w4, 0x12340004);
+ __ Mov(w5, 0x12340005);
+ __ Mov(w6, 0x12340006);
+ __ Fmov(d0, 123400.0);
+ __ Fmov(d1, 123401.0);
+ __ Fmov(s2, 123402.0);
+
+ // Actually push them.
+ queue.PushQueued();
+
+ Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6));
+ Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, 2));
+
+ // Pop them conventionally.
+ __ Pop(s2);
+ __ Pop(d1, d0);
+ __ Pop(w6, w5, w4);
+ __ Pop(x3, x2, x1, x0);
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1234000000000000, x0);
+ ASSERT_EQUAL_64(0x1234000100010001, x1);
+ ASSERT_EQUAL_64(0x1234000200020002, x2);
+ ASSERT_EQUAL_64(0x1234000300030003, x3);
+
+ ASSERT_EQUAL_32(0x12340004, w4);
+ ASSERT_EQUAL_32(0x12340005, w5);
+ ASSERT_EQUAL_32(0x12340006, w6);
+
+ ASSERT_EQUAL_FP64(123400.0, d0);
+ ASSERT_EQUAL_FP64(123401.0, d1);
+
+ ASSERT_EQUAL_FP32(123402.0, s2);
+
+ TEARDOWN();
+}
+
+
+TEST(pop_queued) {
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ ASSERT(__ StackPointer().Is(csp));
+ __ Mov(jssp, __ StackPointer());
+ __ SetStackPointer(jssp);
+
+ MacroAssembler::PushPopQueue queue(&masm);
+
+ __ Mov(x0, 0x1234000000000000);
+ __ Mov(x1, 0x1234000100010001);
+ __ Mov(x2, 0x1234000200020002);
+ __ Mov(x3, 0x1234000300030003);
+ __ Mov(w4, 0x12340004);
+ __ Mov(w5, 0x12340005);
+ __ Mov(w6, 0x12340006);
+ __ Fmov(d0, 123400.0);
+ __ Fmov(d1, 123401.0);
+ __ Fmov(s2, 123402.0);
+
+ // Push registers conventionally.
+ __ Push(x0, x1, x2, x3);
+ __ Push(w4, w5, w6);
+ __ Push(d0, d1);
+ __ Push(s2);
+
+ // Queue up a pop.
+ queue.Queue(s2);
+
+ queue.Queue(d1);
+ queue.Queue(d0);
+
+ queue.Queue(w6);
+ queue.Queue(w5);
+ queue.Queue(w4);
+
+ queue.Queue(x3);
+ queue.Queue(x2);
+ queue.Queue(x1);
+ queue.Queue(x0);
+
+ Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6));
+ Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, 2));
+
+ // Actually pop them.
+ queue.PopQueued();
+
+ __ Mov(csp, __ StackPointer());
+ __ SetStackPointer(csp);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1234000000000000, x0);
+ ASSERT_EQUAL_64(0x1234000100010001, x1);
+ ASSERT_EQUAL_64(0x1234000200020002, x2);
+ ASSERT_EQUAL_64(0x1234000300030003, x3);
+
+ ASSERT_EQUAL_64(0x0000000012340004, x4);
+ ASSERT_EQUAL_64(0x0000000012340005, x5);
+ ASSERT_EQUAL_64(0x0000000012340006, x6);
+
+ ASSERT_EQUAL_FP64(123400.0, d0);
+ ASSERT_EQUAL_FP64(123401.0, d1);
+
+ ASSERT_EQUAL_FP32(123402.0, s2);
+
+ TEARDOWN();
+}
+
+
+TEST(jump_both_smi) {
+ INIT_V8();
+ SETUP();
+
+ Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
+ Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
+ Label return1, return2, return3, done;
+
+ START();
+
+ __ Mov(x0, 0x5555555500000001UL); // A pointer.
+ __ Mov(x1, 0xaaaaaaaa00000001UL); // A pointer.
+ __ Mov(x2, 0x1234567800000000UL); // A smi.
+ __ Mov(x3, 0x8765432100000000UL); // A smi.
+ __ Mov(x4, 0xdead);
+ __ Mov(x5, 0xdead);
+ __ Mov(x6, 0xdead);
+ __ Mov(x7, 0xdead);
+
+ __ JumpIfBothSmi(x0, x1, &cond_pass_00, &cond_fail_00);
+ __ Bind(&return1);
+ __ JumpIfBothSmi(x0, x2, &cond_pass_01, &cond_fail_01);
+ __ Bind(&return2);
+ __ JumpIfBothSmi(x2, x1, &cond_pass_10, &cond_fail_10);
+ __ Bind(&return3);
+ __ JumpIfBothSmi(x2, x3, &cond_pass_11, &cond_fail_11);
+
+ __ Bind(&cond_fail_00);
+ __ Mov(x4, 0);
+ __ B(&return1);
+ __ Bind(&cond_pass_00);
+ __ Mov(x4, 1);
+ __ B(&return1);
+
+ __ Bind(&cond_fail_01);
+ __ Mov(x5, 0);
+ __ B(&return2);
+ __ Bind(&cond_pass_01);
+ __ Mov(x5, 1);
+ __ B(&return2);
+
+ __ Bind(&cond_fail_10);
+ __ Mov(x6, 0);
+ __ B(&return3);
+ __ Bind(&cond_pass_10);
+ __ Mov(x6, 1);
+ __ B(&return3);
+
+ __ Bind(&cond_fail_11);
+ __ Mov(x7, 0);
+ __ B(&done);
+ __ Bind(&cond_pass_11);
+ __ Mov(x7, 1);
+
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x5555555500000001UL, x0);
+ ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1);
+ ASSERT_EQUAL_64(0x1234567800000000UL, x2);
+ ASSERT_EQUAL_64(0x8765432100000000UL, x3);
+ ASSERT_EQUAL_64(0, x4);
+ ASSERT_EQUAL_64(0, x5);
+ ASSERT_EQUAL_64(0, x6);
+ ASSERT_EQUAL_64(1, x7);
+
+ TEARDOWN();
+}
+
+
+TEST(jump_either_smi) {
+ INIT_V8();
+ SETUP();
+
+ Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
+ Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
+ Label return1, return2, return3, done;
+
+ START();
+
+ __ Mov(x0, 0x5555555500000001UL); // A pointer.
+ __ Mov(x1, 0xaaaaaaaa00000001UL); // A pointer.
+ __ Mov(x2, 0x1234567800000000UL); // A smi.
+ __ Mov(x3, 0x8765432100000000UL); // A smi.
+ __ Mov(x4, 0xdead);
+ __ Mov(x5, 0xdead);
+ __ Mov(x6, 0xdead);
+ __ Mov(x7, 0xdead);
+
+ __ JumpIfEitherSmi(x0, x1, &cond_pass_00, &cond_fail_00);
+ __ Bind(&return1);
+ __ JumpIfEitherSmi(x0, x2, &cond_pass_01, &cond_fail_01);
+ __ Bind(&return2);
+ __ JumpIfEitherSmi(x2, x1, &cond_pass_10, &cond_fail_10);
+ __ Bind(&return3);
+ __ JumpIfEitherSmi(x2, x3, &cond_pass_11, &cond_fail_11);
+
+ __ Bind(&cond_fail_00);
+ __ Mov(x4, 0);
+ __ B(&return1);
+ __ Bind(&cond_pass_00);
+ __ Mov(x4, 1);
+ __ B(&return1);
+
+ __ Bind(&cond_fail_01);
+ __ Mov(x5, 0);
+ __ B(&return2);
+ __ Bind(&cond_pass_01);
+ __ Mov(x5, 1);
+ __ B(&return2);
+
+ __ Bind(&cond_fail_10);
+ __ Mov(x6, 0);
+ __ B(&return3);
+ __ Bind(&cond_pass_10);
+ __ Mov(x6, 1);
+ __ B(&return3);
+
+ __ Bind(&cond_fail_11);
+ __ Mov(x7, 0);
+ __ B(&done);
+ __ Bind(&cond_pass_11);
+ __ Mov(x7, 1);
+
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x5555555500000001UL, x0);
+ ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1);
+ ASSERT_EQUAL_64(0x1234567800000000UL, x2);
+ ASSERT_EQUAL_64(0x8765432100000000UL, x3);
+ ASSERT_EQUAL_64(0, x4);
+ ASSERT_EQUAL_64(1, x5);
+ ASSERT_EQUAL_64(1, x6);
+ ASSERT_EQUAL_64(1, x7);
+
+ TEARDOWN();
+}
+
+
+TEST(noreg) {
+ // This test doesn't generate any code, but it verifies some invariants
+ // related to NoReg.
+ CHECK(NoReg.Is(NoFPReg));
+ CHECK(NoFPReg.Is(NoReg));
+ CHECK(NoReg.Is(NoCPUReg));
+ CHECK(NoCPUReg.Is(NoReg));
+ CHECK(NoFPReg.Is(NoCPUReg));
+ CHECK(NoCPUReg.Is(NoFPReg));
+
+ CHECK(NoReg.IsNone());
+ CHECK(NoFPReg.IsNone());
+ CHECK(NoCPUReg.IsNone());
+}
+
+
+TEST(isvalid) {
+ // This test doesn't generate any code, but it verifies some invariants
+ // related to IsValid().
+ CHECK(!NoReg.IsValid());
+ CHECK(!NoFPReg.IsValid());
+ CHECK(!NoCPUReg.IsValid());
+
+ CHECK(x0.IsValid());
+ CHECK(w0.IsValid());
+ CHECK(x30.IsValid());
+ CHECK(w30.IsValid());
+ CHECK(xzr.IsValid());
+ CHECK(wzr.IsValid());
+
+ CHECK(csp.IsValid());
+ CHECK(wcsp.IsValid());
+
+ CHECK(d0.IsValid());
+ CHECK(s0.IsValid());
+ CHECK(d31.IsValid());
+ CHECK(s31.IsValid());
+
+ CHECK(x0.IsValidRegister());
+ CHECK(w0.IsValidRegister());
+ CHECK(xzr.IsValidRegister());
+ CHECK(wzr.IsValidRegister());
+ CHECK(csp.IsValidRegister());
+ CHECK(wcsp.IsValidRegister());
+ CHECK(!x0.IsValidFPRegister());
+ CHECK(!w0.IsValidFPRegister());
+ CHECK(!xzr.IsValidFPRegister());
+ CHECK(!wzr.IsValidFPRegister());
+ CHECK(!csp.IsValidFPRegister());
+ CHECK(!wcsp.IsValidFPRegister());
+
+ CHECK(d0.IsValidFPRegister());
+ CHECK(s0.IsValidFPRegister());
+ CHECK(!d0.IsValidRegister());
+ CHECK(!s0.IsValidRegister());
+
+ // Test the same as before, but using CPURegister types. This shouldn't make
+ // any difference.
+ CHECK(static_cast<CPURegister>(x0).IsValid());
+ CHECK(static_cast<CPURegister>(w0).IsValid());
+ CHECK(static_cast<CPURegister>(x30).IsValid());
+ CHECK(static_cast<CPURegister>(w30).IsValid());
+ CHECK(static_cast<CPURegister>(xzr).IsValid());
+ CHECK(static_cast<CPURegister>(wzr).IsValid());
+
+ CHECK(static_cast<CPURegister>(csp).IsValid());
+ CHECK(static_cast<CPURegister>(wcsp).IsValid());
+
+ CHECK(static_cast<CPURegister>(d0).IsValid());
+ CHECK(static_cast<CPURegister>(s0).IsValid());
+ CHECK(static_cast<CPURegister>(d31).IsValid());
+ CHECK(static_cast<CPURegister>(s31).IsValid());
+
+ CHECK(static_cast<CPURegister>(x0).IsValidRegister());
+ CHECK(static_cast<CPURegister>(w0).IsValidRegister());
+ CHECK(static_cast<CPURegister>(xzr).IsValidRegister());
+ CHECK(static_cast<CPURegister>(wzr).IsValidRegister());
+ CHECK(static_cast<CPURegister>(csp).IsValidRegister());
+ CHECK(static_cast<CPURegister>(wcsp).IsValidRegister());
+ CHECK(!static_cast<CPURegister>(x0).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(w0).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(xzr).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(wzr).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(csp).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(wcsp).IsValidFPRegister());
+
+ CHECK(static_cast<CPURegister>(d0).IsValidFPRegister());
+ CHECK(static_cast<CPURegister>(s0).IsValidFPRegister());
+ CHECK(!static_cast<CPURegister>(d0).IsValidRegister());
+ CHECK(!static_cast<CPURegister>(s0).IsValidRegister());
+}
+
+
+TEST(cpureglist_utils_x) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test a list of X registers.
+ CPURegList test(x0, x1, x2, x3);
+
+ CHECK(test.IncludesAliasOf(x0));
+ CHECK(test.IncludesAliasOf(x1));
+ CHECK(test.IncludesAliasOf(x2));
+ CHECK(test.IncludesAliasOf(x3));
+ CHECK(test.IncludesAliasOf(w0));
+ CHECK(test.IncludesAliasOf(w1));
+ CHECK(test.IncludesAliasOf(w2));
+ CHECK(test.IncludesAliasOf(w3));
+
+ CHECK(!test.IncludesAliasOf(x4));
+ CHECK(!test.IncludesAliasOf(x30));
+ CHECK(!test.IncludesAliasOf(xzr));
+ CHECK(!test.IncludesAliasOf(csp));
+ CHECK(!test.IncludesAliasOf(w4));
+ CHECK(!test.IncludesAliasOf(w30));
+ CHECK(!test.IncludesAliasOf(wzr));
+ CHECK(!test.IncludesAliasOf(wcsp));
+
+ CHECK(!test.IncludesAliasOf(d0));
+ CHECK(!test.IncludesAliasOf(d1));
+ CHECK(!test.IncludesAliasOf(d2));
+ CHECK(!test.IncludesAliasOf(d3));
+ CHECK(!test.IncludesAliasOf(s0));
+ CHECK(!test.IncludesAliasOf(s1));
+ CHECK(!test.IncludesAliasOf(s2));
+ CHECK(!test.IncludesAliasOf(s3));
+
+ CHECK(!test.IsEmpty());
+
+ CHECK(test.type() == x0.type());
+
+ CHECK(test.PopHighestIndex().Is(x3));
+ CHECK(test.PopLowestIndex().Is(x0));
+
+ CHECK(test.IncludesAliasOf(x1));
+ CHECK(test.IncludesAliasOf(x2));
+ CHECK(test.IncludesAliasOf(w1));
+ CHECK(test.IncludesAliasOf(w2));
+ CHECK(!test.IncludesAliasOf(x0));
+ CHECK(!test.IncludesAliasOf(x3));
+ CHECK(!test.IncludesAliasOf(w0));
+ CHECK(!test.IncludesAliasOf(w3));
+
+ CHECK(test.PopHighestIndex().Is(x2));
+ CHECK(test.PopLowestIndex().Is(x1));
+
+ CHECK(!test.IncludesAliasOf(x1));
+ CHECK(!test.IncludesAliasOf(x2));
+ CHECK(!test.IncludesAliasOf(w1));
+ CHECK(!test.IncludesAliasOf(w2));
+
+ CHECK(test.IsEmpty());
+}
+
+
+TEST(cpureglist_utils_w) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test a list of W registers.
+ CPURegList test(w10, w11, w12, w13);
+
+ CHECK(test.IncludesAliasOf(x10));
+ CHECK(test.IncludesAliasOf(x11));
+ CHECK(test.IncludesAliasOf(x12));
+ CHECK(test.IncludesAliasOf(x13));
+ CHECK(test.IncludesAliasOf(w10));
+ CHECK(test.IncludesAliasOf(w11));
+ CHECK(test.IncludesAliasOf(w12));
+ CHECK(test.IncludesAliasOf(w13));
+
+ CHECK(!test.IncludesAliasOf(x0));
+ CHECK(!test.IncludesAliasOf(x9));
+ CHECK(!test.IncludesAliasOf(x14));
+ CHECK(!test.IncludesAliasOf(x30));
+ CHECK(!test.IncludesAliasOf(xzr));
+ CHECK(!test.IncludesAliasOf(csp));
+ CHECK(!test.IncludesAliasOf(w0));
+ CHECK(!test.IncludesAliasOf(w9));
+ CHECK(!test.IncludesAliasOf(w14));
+ CHECK(!test.IncludesAliasOf(w30));
+ CHECK(!test.IncludesAliasOf(wzr));
+ CHECK(!test.IncludesAliasOf(wcsp));
+
+ CHECK(!test.IncludesAliasOf(d10));
+ CHECK(!test.IncludesAliasOf(d11));
+ CHECK(!test.IncludesAliasOf(d12));
+ CHECK(!test.IncludesAliasOf(d13));
+ CHECK(!test.IncludesAliasOf(s10));
+ CHECK(!test.IncludesAliasOf(s11));
+ CHECK(!test.IncludesAliasOf(s12));
+ CHECK(!test.IncludesAliasOf(s13));
+
+ CHECK(!test.IsEmpty());
+
+ CHECK(test.type() == w10.type());
+
+ CHECK(test.PopHighestIndex().Is(w13));
+ CHECK(test.PopLowestIndex().Is(w10));
+
+ CHECK(test.IncludesAliasOf(x11));
+ CHECK(test.IncludesAliasOf(x12));
+ CHECK(test.IncludesAliasOf(w11));
+ CHECK(test.IncludesAliasOf(w12));
+ CHECK(!test.IncludesAliasOf(x10));
+ CHECK(!test.IncludesAliasOf(x13));
+ CHECK(!test.IncludesAliasOf(w10));
+ CHECK(!test.IncludesAliasOf(w13));
+
+ CHECK(test.PopHighestIndex().Is(w12));
+ CHECK(test.PopLowestIndex().Is(w11));
+
+ CHECK(!test.IncludesAliasOf(x11));
+ CHECK(!test.IncludesAliasOf(x12));
+ CHECK(!test.IncludesAliasOf(w11));
+ CHECK(!test.IncludesAliasOf(w12));
+
+ CHECK(test.IsEmpty());
+}
+
+
+TEST(cpureglist_utils_d) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test a list of D registers.
+ CPURegList test(d20, d21, d22, d23);
+
+ CHECK(test.IncludesAliasOf(d20));
+ CHECK(test.IncludesAliasOf(d21));
+ CHECK(test.IncludesAliasOf(d22));
+ CHECK(test.IncludesAliasOf(d23));
+ CHECK(test.IncludesAliasOf(s20));
+ CHECK(test.IncludesAliasOf(s21));
+ CHECK(test.IncludesAliasOf(s22));
+ CHECK(test.IncludesAliasOf(s23));
+
+ CHECK(!test.IncludesAliasOf(d0));
+ CHECK(!test.IncludesAliasOf(d19));
+ CHECK(!test.IncludesAliasOf(d24));
+ CHECK(!test.IncludesAliasOf(d31));
+ CHECK(!test.IncludesAliasOf(s0));
+ CHECK(!test.IncludesAliasOf(s19));
+ CHECK(!test.IncludesAliasOf(s24));
+ CHECK(!test.IncludesAliasOf(s31));
+
+ CHECK(!test.IncludesAliasOf(x20));
+ CHECK(!test.IncludesAliasOf(x21));
+ CHECK(!test.IncludesAliasOf(x22));
+ CHECK(!test.IncludesAliasOf(x23));
+ CHECK(!test.IncludesAliasOf(w20));
+ CHECK(!test.IncludesAliasOf(w21));
+ CHECK(!test.IncludesAliasOf(w22));
+ CHECK(!test.IncludesAliasOf(w23));
+
+ CHECK(!test.IncludesAliasOf(xzr));
+ CHECK(!test.IncludesAliasOf(wzr));
+ CHECK(!test.IncludesAliasOf(csp));
+ CHECK(!test.IncludesAliasOf(wcsp));
+
+ CHECK(!test.IsEmpty());
+
+ CHECK(test.type() == d20.type());
+
+ CHECK(test.PopHighestIndex().Is(d23));
+ CHECK(test.PopLowestIndex().Is(d20));
+
+ CHECK(test.IncludesAliasOf(d21));
+ CHECK(test.IncludesAliasOf(d22));
+ CHECK(test.IncludesAliasOf(s21));
+ CHECK(test.IncludesAliasOf(s22));
+ CHECK(!test.IncludesAliasOf(d20));
+ CHECK(!test.IncludesAliasOf(d23));
+ CHECK(!test.IncludesAliasOf(s20));
+ CHECK(!test.IncludesAliasOf(s23));
+
+ CHECK(test.PopHighestIndex().Is(d22));
+ CHECK(test.PopLowestIndex().Is(d21));
+
+ CHECK(!test.IncludesAliasOf(d21));
+ CHECK(!test.IncludesAliasOf(d22));
+ CHECK(!test.IncludesAliasOf(s21));
+ CHECK(!test.IncludesAliasOf(s22));
+
+ CHECK(test.IsEmpty());
+}
+
+
+TEST(cpureglist_utils_s) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test a list of S registers.
+ CPURegList test(s20, s21, s22, s23);
+
+ // The type and size mechanisms are already covered, so here we just test
+ // that lists of S registers alias individual D registers.
+
+ CHECK(test.IncludesAliasOf(d20));
+ CHECK(test.IncludesAliasOf(d21));
+ CHECK(test.IncludesAliasOf(d22));
+ CHECK(test.IncludesAliasOf(d23));
+ CHECK(test.IncludesAliasOf(s20));
+ CHECK(test.IncludesAliasOf(s21));
+ CHECK(test.IncludesAliasOf(s22));
+ CHECK(test.IncludesAliasOf(s23));
+}
+
+
+TEST(cpureglist_utils_empty) {
+ // This test doesn't generate any code, but it verifies the behaviour of
+ // the CPURegList utility methods.
+
+ // Test an empty list.
+ // Empty lists can have type and size properties. Check that we can create
+ // them, and that they are empty.
+ CPURegList reg32(CPURegister::kRegister, kWRegSizeInBits, 0);
+ CPURegList reg64(CPURegister::kRegister, kXRegSizeInBits, 0);
+ CPURegList fpreg32(CPURegister::kFPRegister, kSRegSizeInBits, 0);
+ CPURegList fpreg64(CPURegister::kFPRegister, kDRegSizeInBits, 0);
+
+ CHECK(reg32.IsEmpty());
+ CHECK(reg64.IsEmpty());
+ CHECK(fpreg32.IsEmpty());
+ CHECK(fpreg64.IsEmpty());
+
+ CHECK(reg32.PopLowestIndex().IsNone());
+ CHECK(reg64.PopLowestIndex().IsNone());
+ CHECK(fpreg32.PopLowestIndex().IsNone());
+ CHECK(fpreg64.PopLowestIndex().IsNone());
+
+ CHECK(reg32.PopHighestIndex().IsNone());
+ CHECK(reg64.PopHighestIndex().IsNone());
+ CHECK(fpreg32.PopHighestIndex().IsNone());
+ CHECK(fpreg64.PopHighestIndex().IsNone());
+
+ CHECK(reg32.IsEmpty());
+ CHECK(reg64.IsEmpty());
+ CHECK(fpreg32.IsEmpty());
+ CHECK(fpreg64.IsEmpty());
+}
+
+
+TEST(printf) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ char const * test_plain_string = "Printf with no arguments.\n";
+ char const * test_substring = "'This is a substring.'";
+ RegisterDump before;
+
+ // Initialize x29 to the value of the stack pointer. We will use x29 as a
+ // temporary stack pointer later, and initializing it in this way allows the
+ // RegisterDump check to pass.
+ __ Mov(x29, __ StackPointer());
+
+ // Test simple integer arguments.
+ __ Mov(x0, 1234);
+ __ Mov(x1, 0x1234);
+
+ // Test simple floating-point arguments.
+ __ Fmov(d0, 1.234);
+
+ // Test pointer (string) arguments.
+ __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
+
+ // Test the maximum number of arguments, and sign extension.
+ __ Mov(w3, 0xffffffff);
+ __ Mov(w4, 0xffffffff);
+ __ Mov(x5, 0xffffffffffffffff);
+ __ Mov(x6, 0xffffffffffffffff);
+ __ Fmov(s1, 1.234);
+ __ Fmov(s2, 2.345);
+ __ Fmov(d3, 3.456);
+ __ Fmov(d4, 4.567);
+
+ // Test printing callee-saved registers.
+ __ Mov(x28, 0x123456789abcdef);
+ __ Fmov(d10, 42.0);
+
+ // Test with three arguments.
+ __ Mov(x10, 3);
+ __ Mov(x11, 40);
+ __ Mov(x12, 500);
+
+ // x8 and x9 are used by debug code in part of the macro assembler. However,
+ // Printf guarantees to preserve them (so we can use Printf in debug code),
+ // and we need to test that they are properly preserved. The above code
+ // shouldn't need to use them, but we initialize x8 and x9 last to be on the
+ // safe side. This test still assumes that none of the code from
+ // before->Dump() to the end of the test can clobber x8 or x9, so where
+ // possible we use the Assembler directly to be safe.
+ __ orr(x8, xzr, 0x8888888888888888);
+ __ orr(x9, xzr, 0x9999999999999999);
+
+ // Check that we don't clobber any registers, except those that we explicitly
+ // write results into.
+ before.Dump(&masm);
+
+ __ Printf(test_plain_string); // NOLINT(runtime/printf)
+ __ Printf("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
+ __ Printf("d0: %f\n", d0);
+ __ Printf("Test %%s: %s\n", x2);
+ __ Printf("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
+ "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
+ w3, w4, x5, x6);
+ __ Printf("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
+ __ Printf("0x%08" PRIx32 ", 0x%016" PRIx64 "\n", x28, x28);
+ __ Printf("%g\n", d10);
+
+ // Test with a different stack pointer.
+ const Register old_stack_pointer = __ StackPointer();
+ __ mov(x29, old_stack_pointer);
+ __ SetStackPointer(x29);
+ __ Printf("old_stack_pointer: 0x%016" PRIx64 "\n", old_stack_pointer);
+ __ mov(old_stack_pointer, __ StackPointer());
+ __ SetStackPointer(old_stack_pointer);
+
+ __ Printf("3=%u, 4=%u, 5=%u\n", x10, x11, x12);
+
+ END();
+ RUN();
+
+ // We cannot easily test the output of the Printf sequences, and because
+ // Printf preserves all registers by default, we can't look at the number of
+ // bytes that were printed. However, the printf_no_preserve test should check
+ // that, and here we just test that we didn't clobber any registers.
+ ASSERT_EQUAL_REGISTERS(before);
+
+ TEARDOWN();
+}
+
+
+TEST(printf_no_preserve) {
+ INIT_V8();
+ SETUP();
+ START();
+
+ char const * test_plain_string = "Printf with no arguments.\n";
+ char const * test_substring = "'This is a substring.'";
+
+ __ PrintfNoPreserve(test_plain_string); // NOLINT(runtime/printf)
+ __ Mov(x19, x0);
+
+ // Test simple integer arguments.
+ __ Mov(x0, 1234);
+ __ Mov(x1, 0x1234);
+ __ PrintfNoPreserve("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
+ __ Mov(x20, x0);
+
+ // Test simple floating-point arguments.
+ __ Fmov(d0, 1.234);
+ __ PrintfNoPreserve("d0: %f\n", d0);
+ __ Mov(x21, x0);
+
+ // Test pointer (string) arguments.
+ __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
+ __ PrintfNoPreserve("Test %%s: %s\n", x2);
+ __ Mov(x22, x0);
+
+ // Test the maximum number of arguments, and sign extension.
+ __ Mov(w3, 0xffffffff);
+ __ Mov(w4, 0xffffffff);
+ __ Mov(x5, 0xffffffffffffffff);
+ __ Mov(x6, 0xffffffffffffffff);
+ __ PrintfNoPreserve("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
+ "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
+ w3, w4, x5, x6);
+ __ Mov(x23, x0);
+
+ __ Fmov(s1, 1.234);
+ __ Fmov(s2, 2.345);
+ __ Fmov(d3, 3.456);
+ __ Fmov(d4, 4.567);
+ __ PrintfNoPreserve("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
+ __ Mov(x24, x0);
+
+ // Test printing callee-saved registers.
+ __ Mov(x28, 0x123456789abcdef);
+ __ PrintfNoPreserve("0x%08" PRIx32 ", 0x%016" PRIx64 "\n", x28, x28);
+ __ Mov(x25, x0);
+
+ __ Fmov(d10, 42.0);
+ __ PrintfNoPreserve("%g\n", d10);
+ __ Mov(x26, x0);
+
+ // Test with a different stack pointer.
+ const Register old_stack_pointer = __ StackPointer();
+ __ Mov(x29, old_stack_pointer);
+ __ SetStackPointer(x29);
+
+ __ PrintfNoPreserve("old_stack_pointer: 0x%016" PRIx64 "\n",
+ old_stack_pointer);
+ __ Mov(x27, x0);
+
+ __ Mov(old_stack_pointer, __ StackPointer());
+ __ SetStackPointer(old_stack_pointer);
+
+ // Test with three arguments.
+ __ Mov(x3, 3);
+ __ Mov(x4, 40);
+ __ Mov(x5, 500);
+ __ PrintfNoPreserve("3=%u, 4=%u, 5=%u\n", x3, x4, x5);
+ __ Mov(x28, x0);
+
+ END();
+ RUN();
+
+ // We cannot easily test the exact output of the Printf sequences, but we can
+ // use the return code to check that the string length was correct.
+
+ // Printf with no arguments.
+ ASSERT_EQUAL_64(strlen(test_plain_string), x19);
+ // x0: 1234, x1: 0x00001234
+ ASSERT_EQUAL_64(25, x20);
+ // d0: 1.234000
+ ASSERT_EQUAL_64(13, x21);
+ // Test %s: 'This is a substring.'
+ ASSERT_EQUAL_64(32, x22);
+ // w3(uint32): 4294967295
+ // w4(int32): -1
+ // x5(uint64): 18446744073709551615
+ // x6(int64): -1
+ ASSERT_EQUAL_64(23 + 14 + 33 + 14, x23);
+ // %f: 1.234000
+ // %g: 2.345
+ // %e: 3.456000e+00
+ // %E: 4.567000E+00
+ ASSERT_EQUAL_64(13 + 10 + 17 + 17, x24);
+ // 0x89abcdef, 0x0123456789abcdef
+ ASSERT_EQUAL_64(31, x25);
+ // 42
+ ASSERT_EQUAL_64(3, x26);
+ // old_stack_pointer: 0x00007fb037ae2370
+ // Note: This is an example value, but the field width is fixed here so the
+ // string length is still predictable.
+ ASSERT_EQUAL_64(38, x27);
+ // 3=3, 4=40, 5=500
+ ASSERT_EQUAL_64(17, x28);
+
+ TEARDOWN();
+}
+
+
+// This is a V8-specific test.
+static void CopyFieldsHelper(CPURegList temps) {
+ static const uint64_t kLiteralBase = 0x0100001000100101UL;
+ static const uint64_t src[] = {kLiteralBase * 1,
+ kLiteralBase * 2,
+ kLiteralBase * 3,
+ kLiteralBase * 4,
+ kLiteralBase * 5,
+ kLiteralBase * 6,
+ kLiteralBase * 7,
+ kLiteralBase * 8,
+ kLiteralBase * 9,
+ kLiteralBase * 10,
+ kLiteralBase * 11};
+ static const uint64_t src_tagged =
+ reinterpret_cast<uint64_t>(src) + kHeapObjectTag;
+
+ static const unsigned kTestCount = sizeof(src) / sizeof(src[0]) + 1;
+ uint64_t* dst[kTestCount];
+ uint64_t dst_tagged[kTestCount];
+
+ // The first test will be to copy 0 fields. The destination (and source)
+ // should not be accessed in any way.
+ dst[0] = NULL;
+ dst_tagged[0] = kHeapObjectTag;
+
+ // Allocate memory for each other test. Each test <n> will have <n> fields.
+ // This is intended to exercise as many paths in CopyFields as possible.
+ for (unsigned i = 1; i < kTestCount; i++) {
+ dst[i] = new uint64_t[i];
+ memset(dst[i], 0, i * sizeof(kLiteralBase));
+ dst_tagged[i] = reinterpret_cast<uint64_t>(dst[i]) + kHeapObjectTag;
+ }
+
+ SETUP();
+ START();
+
+ __ Mov(x0, dst_tagged[0]);
+ __ Mov(x1, 0);
+ __ CopyFields(x0, x1, temps, 0);
+ for (unsigned i = 1; i < kTestCount; i++) {
+ __ Mov(x0, dst_tagged[i]);
+ __ Mov(x1, src_tagged);
+ __ CopyFields(x0, x1, temps, i);
+ }
+
+ END();
+ RUN();
+ TEARDOWN();
+
+ for (unsigned i = 1; i < kTestCount; i++) {
+ for (unsigned j = 0; j < i; j++) {
+ CHECK(src[j] == dst[i][j]);
+ }
+ delete [] dst[i];
+ }
+}
+
+
+// This is a V8-specific test.
+TEST(copyfields) {
+ INIT_V8();
+ CopyFieldsHelper(CPURegList(x10));
+ CopyFieldsHelper(CPURegList(x10, x11));
+ CopyFieldsHelper(CPURegList(x10, x11, x12));
+ CopyFieldsHelper(CPURegList(x10, x11, x12, x13));
+}
+
+
+static void DoSmiAbsTest(int32_t value, bool must_fail = false) {
+ SETUP();
+
+ START();
+ Label end, slow;
+ __ Mov(x2, 0xc001c0de);
+ __ Mov(x1, value);
+ __ SmiTag(x1);
+ __ SmiAbs(x1, &slow);
+ __ SmiUntag(x1);
+ __ B(&end);
+
+ __ Bind(&slow);
+ __ Mov(x2, 0xbad);
+
+ __ Bind(&end);
+ END();
+
+ RUN();
+
+ if (must_fail) {
+ // We tested an invalid conversion. The code must have jump on slow.
+ ASSERT_EQUAL_64(0xbad, x2);
+ } else {
+ // The conversion is valid, check the result.
+ int32_t result = (value >= 0) ? value : -value;
+ ASSERT_EQUAL_64(result, x1);
+
+ // Check that we didn't jump on slow.
+ ASSERT_EQUAL_64(0xc001c0de, x2);
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(smi_abs) {
+ INIT_V8();
+ // Simple and edge cases.
+ DoSmiAbsTest(0);
+ DoSmiAbsTest(0x12345);
+ DoSmiAbsTest(0x40000000);
+ DoSmiAbsTest(0x7fffffff);
+ DoSmiAbsTest(-1);
+ DoSmiAbsTest(-12345);
+ DoSmiAbsTest(0x80000001);
+
+ // Check that the most negative SMI is detected.
+ DoSmiAbsTest(0x80000000, true);
+}
+
+
+TEST(blr_lr) {
+ // A simple test to check that the simulator correcty handle "blr lr".
+ INIT_V8();
+ SETUP();
+
+ START();
+ Label target;
+ Label end;
+
+ __ Mov(x0, 0x0);
+ __ Adr(lr, &target);
+
+ __ Blr(lr);
+ __ Mov(x0, 0xdeadbeef);
+ __ B(&end);
+
+ __ Bind(&target);
+ __ Mov(x0, 0xc001c0de);
+
+ __ Bind(&end);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0xc001c0de, x0);
+
+ TEARDOWN();
+}
+
+
+TEST(barriers) {
+ // Generate all supported barriers, this is just a smoke test
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ // DMB
+ __ Dmb(FullSystem, BarrierAll);
+ __ Dmb(FullSystem, BarrierReads);
+ __ Dmb(FullSystem, BarrierWrites);
+ __ Dmb(FullSystem, BarrierOther);
+
+ __ Dmb(InnerShareable, BarrierAll);
+ __ Dmb(InnerShareable, BarrierReads);
+ __ Dmb(InnerShareable, BarrierWrites);
+ __ Dmb(InnerShareable, BarrierOther);
+
+ __ Dmb(NonShareable, BarrierAll);
+ __ Dmb(NonShareable, BarrierReads);
+ __ Dmb(NonShareable, BarrierWrites);
+ __ Dmb(NonShareable, BarrierOther);
+
+ __ Dmb(OuterShareable, BarrierAll);
+ __ Dmb(OuterShareable, BarrierReads);
+ __ Dmb(OuterShareable, BarrierWrites);
+ __ Dmb(OuterShareable, BarrierOther);
+
+ // DSB
+ __ Dsb(FullSystem, BarrierAll);
+ __ Dsb(FullSystem, BarrierReads);
+ __ Dsb(FullSystem, BarrierWrites);
+ __ Dsb(FullSystem, BarrierOther);
+
+ __ Dsb(InnerShareable, BarrierAll);
+ __ Dsb(InnerShareable, BarrierReads);
+ __ Dsb(InnerShareable, BarrierWrites);
+ __ Dsb(InnerShareable, BarrierOther);
+
+ __ Dsb(NonShareable, BarrierAll);
+ __ Dsb(NonShareable, BarrierReads);
+ __ Dsb(NonShareable, BarrierWrites);
+ __ Dsb(NonShareable, BarrierOther);
+
+ __ Dsb(OuterShareable, BarrierAll);
+ __ Dsb(OuterShareable, BarrierReads);
+ __ Dsb(OuterShareable, BarrierWrites);
+ __ Dsb(OuterShareable, BarrierOther);
+
+ // ISB
+ __ Isb();
+
+ END();
+
+ RUN();
+
+ TEARDOWN();
+}
+
+
+TEST(process_nan_double) {
+ INIT_V8();
+ // Make sure that NaN propagation works correctly.
+ double sn = rawbits_to_double(0x7ff5555511111111);
+ double qn = rawbits_to_double(0x7ffaaaaa11111111);
+ ASSERT(IsSignallingNaN(sn));
+ ASSERT(IsQuietNaN(qn));
+
+ // The input NaNs after passing through ProcessNaN.
+ double sn_proc = rawbits_to_double(0x7ffd555511111111);
+ double qn_proc = qn;
+ ASSERT(IsQuietNaN(sn_proc));
+ ASSERT(IsQuietNaN(qn_proc));
+
+ SETUP();
+ START();
+
+ // Execute a number of instructions which all use ProcessNaN, and check that
+ // they all handle the NaN correctly.
+ __ Fmov(d0, sn);
+ __ Fmov(d10, qn);
+
+ // Operations that always propagate NaNs unchanged, even signalling NaNs.
+ // - Signalling NaN
+ __ Fmov(d1, d0);
+ __ Fabs(d2, d0);
+ __ Fneg(d3, d0);
+ // - Quiet NaN
+ __ Fmov(d11, d10);
+ __ Fabs(d12, d10);
+ __ Fneg(d13, d10);
+
+ // Operations that use ProcessNaN.
+ // - Signalling NaN
+ __ Fsqrt(d4, d0);
+ __ Frinta(d5, d0);
+ __ Frintn(d6, d0);
+ __ Frintz(d7, d0);
+ // - Quiet NaN
+ __ Fsqrt(d14, d10);
+ __ Frinta(d15, d10);
+ __ Frintn(d16, d10);
+ __ Frintz(d17, d10);
+
+ // The behaviour of fcvt is checked in TEST(fcvt_sd).
+
+ END();
+ RUN();
+
+ uint64_t qn_raw = double_to_rawbits(qn);
+ uint64_t sn_raw = double_to_rawbits(sn);
+
+ // - Signalling NaN
+ ASSERT_EQUAL_FP64(sn, d1);
+ ASSERT_EQUAL_FP64(rawbits_to_double(sn_raw & ~kDSignMask), d2);
+ ASSERT_EQUAL_FP64(rawbits_to_double(sn_raw ^ kDSignMask), d3);
+ // - Quiet NaN
+ ASSERT_EQUAL_FP64(qn, d11);
+ ASSERT_EQUAL_FP64(rawbits_to_double(qn_raw & ~kDSignMask), d12);
+ ASSERT_EQUAL_FP64(rawbits_to_double(qn_raw ^ kDSignMask), d13);
+
+ // - Signalling NaN
+ ASSERT_EQUAL_FP64(sn_proc, d4);
+ ASSERT_EQUAL_FP64(sn_proc, d5);
+ ASSERT_EQUAL_FP64(sn_proc, d6);
+ ASSERT_EQUAL_FP64(sn_proc, d7);
+ // - Quiet NaN
+ ASSERT_EQUAL_FP64(qn_proc, d14);
+ ASSERT_EQUAL_FP64(qn_proc, d15);
+ ASSERT_EQUAL_FP64(qn_proc, d16);
+ ASSERT_EQUAL_FP64(qn_proc, d17);
+
+ TEARDOWN();
+}
+
+
+TEST(process_nan_float) {
+ INIT_V8();
+ // Make sure that NaN propagation works correctly.
+ float sn = rawbits_to_float(0x7f951111);
+ float qn = rawbits_to_float(0x7fea1111);
+ ASSERT(IsSignallingNaN(sn));
+ ASSERT(IsQuietNaN(qn));
+
+ // The input NaNs after passing through ProcessNaN.
+ float sn_proc = rawbits_to_float(0x7fd51111);
+ float qn_proc = qn;
+ ASSERT(IsQuietNaN(sn_proc));
+ ASSERT(IsQuietNaN(qn_proc));
+
+ SETUP();
+ START();
+
+ // Execute a number of instructions which all use ProcessNaN, and check that
+ // they all handle the NaN correctly.
+ __ Fmov(s0, sn);
+ __ Fmov(s10, qn);
+
+ // Operations that always propagate NaNs unchanged, even signalling NaNs.
+ // - Signalling NaN
+ __ Fmov(s1, s0);
+ __ Fabs(s2, s0);
+ __ Fneg(s3, s0);
+ // - Quiet NaN
+ __ Fmov(s11, s10);
+ __ Fabs(s12, s10);
+ __ Fneg(s13, s10);
+
+ // Operations that use ProcessNaN.
+ // - Signalling NaN
+ __ Fsqrt(s4, s0);
+ __ Frinta(s5, s0);
+ __ Frintn(s6, s0);
+ __ Frintz(s7, s0);
+ // - Quiet NaN
+ __ Fsqrt(s14, s10);
+ __ Frinta(s15, s10);
+ __ Frintn(s16, s10);
+ __ Frintz(s17, s10);
+
+ // The behaviour of fcvt is checked in TEST(fcvt_sd).
+
+ END();
+ RUN();
+
+ uint32_t qn_raw = float_to_rawbits(qn);
+ uint32_t sn_raw = float_to_rawbits(sn);
+
+ // - Signalling NaN
+ ASSERT_EQUAL_FP32(sn, s1);
+ ASSERT_EQUAL_FP32(rawbits_to_float(sn_raw & ~kSSignMask), s2);
+ ASSERT_EQUAL_FP32(rawbits_to_float(sn_raw ^ kSSignMask), s3);
+ // - Quiet NaN
+ ASSERT_EQUAL_FP32(qn, s11);
+ ASSERT_EQUAL_FP32(rawbits_to_float(qn_raw & ~kSSignMask), s12);
+ ASSERT_EQUAL_FP32(rawbits_to_float(qn_raw ^ kSSignMask), s13);
+
+ // - Signalling NaN
+ ASSERT_EQUAL_FP32(sn_proc, s4);
+ ASSERT_EQUAL_FP32(sn_proc, s5);
+ ASSERT_EQUAL_FP32(sn_proc, s6);
+ ASSERT_EQUAL_FP32(sn_proc, s7);
+ // - Quiet NaN
+ ASSERT_EQUAL_FP32(qn_proc, s14);
+ ASSERT_EQUAL_FP32(qn_proc, s15);
+ ASSERT_EQUAL_FP32(qn_proc, s16);
+ ASSERT_EQUAL_FP32(qn_proc, s17);
+
+ TEARDOWN();
+}
+
+
+static void ProcessNaNsHelper(double n, double m, double expected) {
+ ASSERT(std::isnan(n) || std::isnan(m));
+ ASSERT(isnan(expected));
+
+ SETUP();
+ START();
+
+ // Execute a number of instructions which all use ProcessNaNs, and check that
+ // they all propagate NaNs correctly.
+ __ Fmov(d0, n);
+ __ Fmov(d1, m);
+
+ __ Fadd(d2, d0, d1);
+ __ Fsub(d3, d0, d1);
+ __ Fmul(d4, d0, d1);
+ __ Fdiv(d5, d0, d1);
+ __ Fmax(d6, d0, d1);
+ __ Fmin(d7, d0, d1);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_FP64(expected, d2);
+ ASSERT_EQUAL_FP64(expected, d3);
+ ASSERT_EQUAL_FP64(expected, d4);
+ ASSERT_EQUAL_FP64(expected, d5);
+ ASSERT_EQUAL_FP64(expected, d6);
+ ASSERT_EQUAL_FP64(expected, d7);
+
+ TEARDOWN();
+}
+
+
+TEST(process_nans_double) {
+ INIT_V8();
+ // Make sure that NaN propagation works correctly.
+ double sn = rawbits_to_double(0x7ff5555511111111);
+ double sm = rawbits_to_double(0x7ff5555522222222);
+ double qn = rawbits_to_double(0x7ffaaaaa11111111);
+ double qm = rawbits_to_double(0x7ffaaaaa22222222);
+ ASSERT(IsSignallingNaN(sn));
+ ASSERT(IsSignallingNaN(sm));
+ ASSERT(IsQuietNaN(qn));
+ ASSERT(IsQuietNaN(qm));
+
+ // The input NaNs after passing through ProcessNaN.
+ double sn_proc = rawbits_to_double(0x7ffd555511111111);
+ double sm_proc = rawbits_to_double(0x7ffd555522222222);
+ double qn_proc = qn;
+ double qm_proc = qm;
+ ASSERT(IsQuietNaN(sn_proc));
+ ASSERT(IsQuietNaN(sm_proc));
+ ASSERT(IsQuietNaN(qn_proc));
+ ASSERT(IsQuietNaN(qm_proc));
+
+ // Quiet NaNs are propagated.
+ ProcessNaNsHelper(qn, 0, qn_proc);
+ ProcessNaNsHelper(0, qm, qm_proc);
+ ProcessNaNsHelper(qn, qm, qn_proc);
+
+ // Signalling NaNs are propagated, and made quiet.
+ ProcessNaNsHelper(sn, 0, sn_proc);
+ ProcessNaNsHelper(0, sm, sm_proc);
+ ProcessNaNsHelper(sn, sm, sn_proc);
+
+ // Signalling NaNs take precedence over quiet NaNs.
+ ProcessNaNsHelper(sn, qm, sn_proc);
+ ProcessNaNsHelper(qn, sm, sm_proc);
+ ProcessNaNsHelper(sn, sm, sn_proc);
+}
+
+
+static void ProcessNaNsHelper(float n, float m, float expected) {
+ ASSERT(std::isnan(n) || std::isnan(m));
+ ASSERT(isnan(expected));
+
+ SETUP();
+ START();
+
+ // Execute a number of instructions which all use ProcessNaNs, and check that
+ // they all propagate NaNs correctly.
+ __ Fmov(s0, n);
+ __ Fmov(s1, m);
+
+ __ Fadd(s2, s0, s1);
+ __ Fsub(s3, s0, s1);
+ __ Fmul(s4, s0, s1);
+ __ Fdiv(s5, s0, s1);
+ __ Fmax(s6, s0, s1);
+ __ Fmin(s7, s0, s1);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_FP32(expected, s2);
+ ASSERT_EQUAL_FP32(expected, s3);
+ ASSERT_EQUAL_FP32(expected, s4);
+ ASSERT_EQUAL_FP32(expected, s5);
+ ASSERT_EQUAL_FP32(expected, s6);
+ ASSERT_EQUAL_FP32(expected, s7);
+
+ TEARDOWN();
+}
+
+
+TEST(process_nans_float) {
+ INIT_V8();
+ // Make sure that NaN propagation works correctly.
+ float sn = rawbits_to_float(0x7f951111);
+ float sm = rawbits_to_float(0x7f952222);
+ float qn = rawbits_to_float(0x7fea1111);
+ float qm = rawbits_to_float(0x7fea2222);
+ ASSERT(IsSignallingNaN(sn));
+ ASSERT(IsSignallingNaN(sm));
+ ASSERT(IsQuietNaN(qn));
+ ASSERT(IsQuietNaN(qm));
+
+ // The input NaNs after passing through ProcessNaN.
+ float sn_proc = rawbits_to_float(0x7fd51111);
+ float sm_proc = rawbits_to_float(0x7fd52222);
+ float qn_proc = qn;
+ float qm_proc = qm;
+ ASSERT(IsQuietNaN(sn_proc));
+ ASSERT(IsQuietNaN(sm_proc));
+ ASSERT(IsQuietNaN(qn_proc));
+ ASSERT(IsQuietNaN(qm_proc));
+
+ // Quiet NaNs are propagated.
+ ProcessNaNsHelper(qn, 0, qn_proc);
+ ProcessNaNsHelper(0, qm, qm_proc);
+ ProcessNaNsHelper(qn, qm, qn_proc);
+
+ // Signalling NaNs are propagated, and made quiet.
+ ProcessNaNsHelper(sn, 0, sn_proc);
+ ProcessNaNsHelper(0, sm, sm_proc);
+ ProcessNaNsHelper(sn, sm, sn_proc);
+
+ // Signalling NaNs take precedence over quiet NaNs.
+ ProcessNaNsHelper(sn, qm, sn_proc);
+ ProcessNaNsHelper(qn, sm, sm_proc);
+ ProcessNaNsHelper(sn, sm, sn_proc);
+}
+
+
+static void DefaultNaNHelper(float n, float m, float a) {
+ ASSERT(std::isnan(n) || std::isnan(m) || isnan(a));
+
+ bool test_1op = std::isnan(n);
+ bool test_2op = std::isnan(n) || std::isnan(m);
+
+ SETUP();
+ START();
+
+ // Enable Default-NaN mode in the FPCR.
+ __ Mrs(x0, FPCR);
+ __ Orr(x1, x0, DN_mask);
+ __ Msr(FPCR, x1);
+
+ // Execute a number of instructions which all use ProcessNaNs, and check that
+ // they all produce the default NaN.
+ __ Fmov(s0, n);
+ __ Fmov(s1, m);
+ __ Fmov(s2, a);
+
+ if (test_1op) {
+ // Operations that always propagate NaNs unchanged, even signalling NaNs.
+ __ Fmov(s10, s0);
+ __ Fabs(s11, s0);
+ __ Fneg(s12, s0);
+
+ // Operations that use ProcessNaN.
+ __ Fsqrt(s13, s0);
+ __ Frinta(s14, s0);
+ __ Frintn(s15, s0);
+ __ Frintz(s16, s0);
+
+ // Fcvt usually has special NaN handling, but it respects default-NaN mode.
+ __ Fcvt(d17, s0);
+ }
+
+ if (test_2op) {
+ __ Fadd(s18, s0, s1);
+ __ Fsub(s19, s0, s1);
+ __ Fmul(s20, s0, s1);
+ __ Fdiv(s21, s0, s1);
+ __ Fmax(s22, s0, s1);
+ __ Fmin(s23, s0, s1);
+ }
+
+ __ Fmadd(s24, s0, s1, s2);
+ __ Fmsub(s25, s0, s1, s2);
+ __ Fnmadd(s26, s0, s1, s2);
+ __ Fnmsub(s27, s0, s1, s2);
+
+ // Restore FPCR.
+ __ Msr(FPCR, x0);
+
+ END();
+ RUN();
+
+ if (test_1op) {
+ uint32_t n_raw = float_to_rawbits(n);
+ ASSERT_EQUAL_FP32(n, s10);
+ ASSERT_EQUAL_FP32(rawbits_to_float(n_raw & ~kSSignMask), s11);
+ ASSERT_EQUAL_FP32(rawbits_to_float(n_raw ^ kSSignMask), s12);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s13);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s14);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s15);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s16);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d17);
+ }
+
+ if (test_2op) {
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s18);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s19);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s20);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s21);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s22);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s23);
+ }
+
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s24);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s25);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s26);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s27);
+
+ TEARDOWN();
+}
+
+
+TEST(default_nan_float) {
+ INIT_V8();
+ float sn = rawbits_to_float(0x7f951111);
+ float sm = rawbits_to_float(0x7f952222);
+ float sa = rawbits_to_float(0x7f95aaaa);
+ float qn = rawbits_to_float(0x7fea1111);
+ float qm = rawbits_to_float(0x7fea2222);
+ float qa = rawbits_to_float(0x7feaaaaa);
+ ASSERT(IsSignallingNaN(sn));
+ ASSERT(IsSignallingNaN(sm));
+ ASSERT(IsSignallingNaN(sa));
+ ASSERT(IsQuietNaN(qn));
+ ASSERT(IsQuietNaN(qm));
+ ASSERT(IsQuietNaN(qa));
+
+ // - Signalling NaNs
+ DefaultNaNHelper(sn, 0.0f, 0.0f);
+ DefaultNaNHelper(0.0f, sm, 0.0f);
+ DefaultNaNHelper(0.0f, 0.0f, sa);
+ DefaultNaNHelper(sn, sm, 0.0f);
+ DefaultNaNHelper(0.0f, sm, sa);
+ DefaultNaNHelper(sn, 0.0f, sa);
+ DefaultNaNHelper(sn, sm, sa);
+ // - Quiet NaNs
+ DefaultNaNHelper(qn, 0.0f, 0.0f);
+ DefaultNaNHelper(0.0f, qm, 0.0f);
+ DefaultNaNHelper(0.0f, 0.0f, qa);
+ DefaultNaNHelper(qn, qm, 0.0f);
+ DefaultNaNHelper(0.0f, qm, qa);
+ DefaultNaNHelper(qn, 0.0f, qa);
+ DefaultNaNHelper(qn, qm, qa);
+ // - Mixed NaNs
+ DefaultNaNHelper(qn, sm, sa);
+ DefaultNaNHelper(sn, qm, sa);
+ DefaultNaNHelper(sn, sm, qa);
+ DefaultNaNHelper(qn, qm, sa);
+ DefaultNaNHelper(sn, qm, qa);
+ DefaultNaNHelper(qn, sm, qa);
+ DefaultNaNHelper(qn, qm, qa);
+}
+
+
+static void DefaultNaNHelper(double n, double m, double a) {
+ ASSERT(std::isnan(n) || std::isnan(m) || isnan(a));
+
+ bool test_1op = std::isnan(n);
+ bool test_2op = std::isnan(n) || std::isnan(m);
+
+ SETUP();
+ START();
+
+ // Enable Default-NaN mode in the FPCR.
+ __ Mrs(x0, FPCR);
+ __ Orr(x1, x0, DN_mask);
+ __ Msr(FPCR, x1);
+
+ // Execute a number of instructions which all use ProcessNaNs, and check that
+ // they all produce the default NaN.
+ __ Fmov(d0, n);
+ __ Fmov(d1, m);
+ __ Fmov(d2, a);
+
+ if (test_1op) {
+ // Operations that always propagate NaNs unchanged, even signalling NaNs.
+ __ Fmov(d10, d0);
+ __ Fabs(d11, d0);
+ __ Fneg(d12, d0);
+
+ // Operations that use ProcessNaN.
+ __ Fsqrt(d13, d0);
+ __ Frinta(d14, d0);
+ __ Frintn(d15, d0);
+ __ Frintz(d16, d0);
+
+ // Fcvt usually has special NaN handling, but it respects default-NaN mode.
+ __ Fcvt(s17, d0);
+ }
+
+ if (test_2op) {
+ __ Fadd(d18, d0, d1);
+ __ Fsub(d19, d0, d1);
+ __ Fmul(d20, d0, d1);
+ __ Fdiv(d21, d0, d1);
+ __ Fmax(d22, d0, d1);
+ __ Fmin(d23, d0, d1);
+ }
+
+ __ Fmadd(d24, d0, d1, d2);
+ __ Fmsub(d25, d0, d1, d2);
+ __ Fnmadd(d26, d0, d1, d2);
+ __ Fnmsub(d27, d0, d1, d2);
+
+ // Restore FPCR.
+ __ Msr(FPCR, x0);
+
+ END();
+ RUN();
+
+ if (test_1op) {
+ uint64_t n_raw = double_to_rawbits(n);
+ ASSERT_EQUAL_FP64(n, d10);
+ ASSERT_EQUAL_FP64(rawbits_to_double(n_raw & ~kDSignMask), d11);
+ ASSERT_EQUAL_FP64(rawbits_to_double(n_raw ^ kDSignMask), d12);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d14);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d15);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d16);
+ ASSERT_EQUAL_FP32(kFP32DefaultNaN, s17);
+ }
+
+ if (test_2op) {
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d18);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d19);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d20);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d21);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d22);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d23);
+ }
+
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d24);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d25);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d26);
+ ASSERT_EQUAL_FP64(kFP64DefaultNaN, d27);
+
+ TEARDOWN();
+}
+
+
+TEST(default_nan_double) {
+ INIT_V8();
+ double sn = rawbits_to_double(0x7ff5555511111111);
+ double sm = rawbits_to_double(0x7ff5555522222222);
+ double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
+ double qn = rawbits_to_double(0x7ffaaaaa11111111);
+ double qm = rawbits_to_double(0x7ffaaaaa22222222);
+ double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
+ ASSERT(IsSignallingNaN(sn));
+ ASSERT(IsSignallingNaN(sm));
+ ASSERT(IsSignallingNaN(sa));
+ ASSERT(IsQuietNaN(qn));
+ ASSERT(IsQuietNaN(qm));
+ ASSERT(IsQuietNaN(qa));
+
+ // - Signalling NaNs
+ DefaultNaNHelper(sn, 0.0, 0.0);
+ DefaultNaNHelper(0.0, sm, 0.0);
+ DefaultNaNHelper(0.0, 0.0, sa);
+ DefaultNaNHelper(sn, sm, 0.0);
+ DefaultNaNHelper(0.0, sm, sa);
+ DefaultNaNHelper(sn, 0.0, sa);
+ DefaultNaNHelper(sn, sm, sa);
+ // - Quiet NaNs
+ DefaultNaNHelper(qn, 0.0, 0.0);
+ DefaultNaNHelper(0.0, qm, 0.0);
+ DefaultNaNHelper(0.0, 0.0, qa);
+ DefaultNaNHelper(qn, qm, 0.0);
+ DefaultNaNHelper(0.0, qm, qa);
+ DefaultNaNHelper(qn, 0.0, qa);
+ DefaultNaNHelper(qn, qm, qa);
+ // - Mixed NaNs
+ DefaultNaNHelper(qn, sm, sa);
+ DefaultNaNHelper(sn, qm, sa);
+ DefaultNaNHelper(sn, sm, qa);
+ DefaultNaNHelper(qn, qm, sa);
+ DefaultNaNHelper(sn, qm, qa);
+ DefaultNaNHelper(qn, sm, qa);
+ DefaultNaNHelper(qn, qm, qa);
+}
+
+
+TEST(call_no_relocation) {
+ Address call_start;
+ Address return_address;
+
+ INIT_V8();
+ SETUP();
+
+ START();
+
+ Label function;
+ Label test;
+
+ __ B(&test);
+
+ __ Bind(&function);
+ __ Mov(x0, 0x1);
+ __ Ret();
+
+ __ Bind(&test);
+ __ Mov(x0, 0x0);
+ __ Push(lr, xzr);
+ {
+ Assembler::BlockConstPoolScope scope(&masm);
+ call_start = buf + __ pc_offset();
+ __ Call(buf + function.pos(), RelocInfo::NONE64);
+ return_address = buf + __ pc_offset();
+ }
+ __ Pop(xzr, lr);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(1, x0);
+
+ // The return_address_from_call_start function doesn't currently encounter any
+ // non-relocatable sequences, so we check it here to make sure it works.
+ // TODO(jbramley): Once Crankshaft is complete, decide if we need to support
+ // non-relocatable calls at all.
+ CHECK(return_address ==
+ Assembler::return_address_from_call_start(call_start));
+
+ TEARDOWN();
+}
+
+
+static void AbsHelperX(int64_t value) {
+ int64_t expected;
+
+ SETUP();
+ START();
+
+ Label fail;
+ Label done;
+
+ __ Mov(x0, 0);
+ __ Mov(x1, value);
+
+ if (value != kXMinInt) {
+ expected = labs(value);
+
+ Label next;
+ // The result is representable.
+ __ Abs(x10, x1);
+ __ Abs(x11, x1, &fail);
+ __ Abs(x12, x1, &fail, &next);
+ __ Bind(&next);
+ __ Abs(x13, x1, NULL, &done);
+ } else {
+ // labs is undefined for kXMinInt but our implementation in the
+ // MacroAssembler will return kXMinInt in such a case.
+ expected = kXMinInt;
+
+ Label next;
+ // The result is not representable.
+ __ Abs(x10, x1);
+ __ Abs(x11, x1, NULL, &fail);
+ __ Abs(x12, x1, &next, &fail);
+ __ Bind(&next);
+ __ Abs(x13, x1, &done);
+ }
+
+ __ Bind(&fail);
+ __ Mov(x0, -1);
+
+ __ Bind(&done);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_64(0, x0);
+ ASSERT_EQUAL_64(value, x1);
+ ASSERT_EQUAL_64(expected, x10);
+ ASSERT_EQUAL_64(expected, x11);
+ ASSERT_EQUAL_64(expected, x12);
+ ASSERT_EQUAL_64(expected, x13);
+
+ TEARDOWN();
+}
+
+
+static void AbsHelperW(int32_t value) {
+ int32_t expected;
+
+ SETUP();
+ START();
+
+ Label fail;
+ Label done;
+
+ __ Mov(w0, 0);
+ // TODO(jbramley): The cast is needed to avoid a sign-extension bug in VIXL.
+ // Once it is fixed, we should remove the cast.
+ __ Mov(w1, static_cast<uint32_t>(value));
+
+ if (value != kWMinInt) {
+ expected = abs(value);
+
+ Label next;
+ // The result is representable.
+ __ Abs(w10, w1);
+ __ Abs(w11, w1, &fail);
+ __ Abs(w12, w1, &fail, &next);
+ __ Bind(&next);
+ __ Abs(w13, w1, NULL, &done);
+ } else {
+ // abs is undefined for kWMinInt but our implementation in the
+ // MacroAssembler will return kWMinInt in such a case.
+ expected = kWMinInt;
+
+ Label next;
+ // The result is not representable.
+ __ Abs(w10, w1);
+ __ Abs(w11, w1, NULL, &fail);
+ __ Abs(w12, w1, &next, &fail);
+ __ Bind(&next);
+ __ Abs(w13, w1, &done);
+ }
+
+ __ Bind(&fail);
+ __ Mov(w0, -1);
+
+ __ Bind(&done);
+
+ END();
+ RUN();
+
+ ASSERT_EQUAL_32(0, w0);
+ ASSERT_EQUAL_32(value, w1);
+ ASSERT_EQUAL_32(expected, w10);
+ ASSERT_EQUAL_32(expected, w11);
+ ASSERT_EQUAL_32(expected, w12);
+ ASSERT_EQUAL_32(expected, w13);
+
+ TEARDOWN();
+}
+
+
+TEST(abs) {
+ INIT_V8();
+ AbsHelperX(0);
+ AbsHelperX(42);
+ AbsHelperX(-42);
+ AbsHelperX(kXMinInt);
+ AbsHelperX(kXMaxInt);
+
+ AbsHelperW(0);
+ AbsHelperW(42);
+ AbsHelperW(-42);
+ AbsHelperW(kWMinInt);
+ AbsHelperW(kWMaxInt);
+}
+
+
+TEST(pool_size) {
+ INIT_V8();
+ SETUP();
+
+ // This test does not execute any code. It only tests that the size of the
+ // pools is read correctly from the RelocInfo.
+
+ Label exit;
+ __ b(&exit);
+
+ const unsigned constant_pool_size = 312;
+ const unsigned veneer_pool_size = 184;
+
+ __ RecordConstPool(constant_pool_size);
+ for (unsigned i = 0; i < constant_pool_size / 4; ++i) {
+ __ dc32(0);
+ }
+
+ __ RecordVeneerPool(masm.pc_offset(), veneer_pool_size);
+ for (unsigned i = 0; i < veneer_pool_size / kInstructionSize; ++i) {
+ __ nop();
+ }
+
+ __ bind(&exit);
+
+ Heap* heap = isolate->heap();
+ CodeDesc desc;
+ Object* code_object = NULL;
+ Code* code;
+ masm.GetCode(&desc);
+ MaybeObject* maybe_code = heap->CreateCode(desc, 0, masm.CodeObject());
+ maybe_code->ToObject(&code_object);
+ code = Code::cast(code_object);
+
+ unsigned pool_count = 0;
+ int pool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
+ RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
+ for (RelocIterator it(code, pool_mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ if (RelocInfo::IsConstPool(info->rmode())) {
+ ASSERT(info->data() == constant_pool_size);
+ ++pool_count;
+ }
+ if (RelocInfo::IsVeneerPool(info->rmode())) {
+ ASSERT(info->data() == veneer_pool_size);
+ ++pool_count;
+ }
+ }
+
+ ASSERT(pool_count == 2);
+
+ TEARDOWN();
+}
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index 4fa5ffecb..446cec6ad 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -99,15 +99,15 @@ TEST(AssemblerX64StackOperations) {
// Assemble a simple function that copies argument 2 and returns it.
// We compile without stack frame pointers, so the gdb debugger shows
// incorrect stack frames when debugging this function (which has them).
- __ push(rbp);
+ __ pushq(rbp);
__ movq(rbp, rsp);
- __ push(arg2); // Value at (rbp - 8)
- __ push(arg2); // Value at (rbp - 16)
- __ push(arg1); // Value at (rbp - 24)
- __ pop(rax);
- __ pop(rax);
- __ pop(rax);
- __ pop(rbp);
+ __ pushq(arg2); // Value at (rbp - 8)
+ __ pushq(arg2); // Value at (rbp - 16)
+ __ pushq(arg1); // Value at (rbp - 24)
+ __ popq(rax);
+ __ popq(rax);
+ __ popq(rax);
+ __ popq(rbp);
__ nop();
__ ret(0);
@@ -153,7 +153,7 @@ TEST(AssemblerX64ImulOperation) {
// Assemble a simple function that multiplies arguments returning the high
// word.
__ movq(rax, arg2);
- __ imul(arg1);
+ __ imulq(arg1);
__ movq(rax, rdx);
__ ret(0);
@@ -330,19 +330,19 @@ TEST(AssemblerX64MemoryOperands) {
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
// Assemble a simple function that copies argument 2 and returns it.
- __ push(rbp);
+ __ pushq(rbp);
__ movq(rbp, rsp);
- __ push(arg2); // Value at (rbp - 8)
- __ push(arg2); // Value at (rbp - 16)
- __ push(arg1); // Value at (rbp - 24)
+ __ pushq(arg2); // Value at (rbp - 8)
+ __ pushq(arg2); // Value at (rbp - 16)
+ __ pushq(arg1); // Value at (rbp - 24)
const int kStackElementSize = 8;
__ movq(rax, Operand(rbp, -3 * kStackElementSize));
- __ pop(arg2);
- __ pop(arg2);
- __ pop(arg2);
- __ pop(rbp);
+ __ popq(arg2);
+ __ popq(arg2);
+ __ popq(arg2);
+ __ popq(rbp);
__ nop();
__ ret(0);
@@ -364,7 +364,7 @@ TEST(AssemblerX64ControlFlow) {
Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
// Assemble a simple function that copies argument 1 and returns it.
- __ push(rbp);
+ __ pushq(rbp);
__ movq(rbp, rsp);
__ movq(rax, arg1);
@@ -372,7 +372,7 @@ TEST(AssemblerX64ControlFlow) {
__ jmp(&target);
__ movq(rax, arg2);
__ bind(&target);
- __ pop(rbp);
+ __ popq(rbp);
__ ret(0);
CodeDesc desc;
@@ -496,11 +496,11 @@ TEST(AssemblerMultiByteNop) {
byte buffer[1024];
Isolate* isolate = CcTest::i_isolate();
Assembler assm(isolate, buffer, sizeof(buffer));
- __ push(rbx);
- __ push(rcx);
- __ push(rdx);
- __ push(rdi);
- __ push(rsi);
+ __ pushq(rbx);
+ __ pushq(rcx);
+ __ pushq(rdx);
+ __ pushq(rdi);
+ __ pushq(rsi);
__ movq(rax, Immediate(1));
__ movq(rbx, Immediate(2));
__ movq(rcx, Immediate(3));
@@ -527,19 +527,19 @@ TEST(AssemblerMultiByteNop) {
__ cmpq(rsi, Immediate(6));
__ j(not_equal, &fail);
__ movq(rax, Immediate(42));
- __ pop(rsi);
- __ pop(rdi);
- __ pop(rdx);
- __ pop(rcx);
- __ pop(rbx);
+ __ popq(rsi);
+ __ popq(rdi);
+ __ popq(rdx);
+ __ popq(rcx);
+ __ popq(rbx);
__ ret(0);
__ bind(&fail);
__ movq(rax, Immediate(13));
- __ pop(rsi);
- __ pop(rdi);
- __ pop(rdx);
- __ pop(rcx);
- __ pop(rbx);
+ __ popq(rsi);
+ __ popq(rdi);
+ __ popq(rdx);
+ __ popq(rcx);
+ __ popq(rbx);
__ ret(0);
CodeDesc desc;
@@ -571,14 +571,14 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
Assembler assm(isolate, buffer, sizeof(buffer));
// Remove return address from the stack for fix stack frame alignment.
- __ pop(rcx);
+ __ popq(rcx);
// Store input vector on the stack.
for (int i = 0; i < ELEMENT_COUNT; i++) {
__ movl(rax, Immediate(vec->Get(i)->Int32Value()));
__ shl(rax, Immediate(0x20));
- __ or_(rax, Immediate(vec->Get(++i)->Int32Value()));
- __ push(rax);
+ __ orq(rax, Immediate(vec->Get(++i)->Int32Value()));
+ __ pushq(rax);
}
// Read vector into a xmm register.
@@ -590,7 +590,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Remove unused data from the stack.
__ addq(rsp, Immediate(ELEMENT_COUNT * sizeof(int32_t)));
// Restore return address.
- __ push(rcx);
+ __ pushq(rcx);
__ ret(0);
diff --git a/deps/v8/test/cctest/test-atomicops.cc b/deps/v8/test/cctest/test-atomicops.cc
new file mode 100644
index 000000000..eba956c85
--- /dev/null
+++ b/deps/v8/test/cctest/test-atomicops.cc
@@ -0,0 +1,276 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "atomicops.h"
+
+using namespace v8::internal;
+
+
+#define CHECK_EQU(v1, v2) \
+ CHECK_EQ(static_cast<int64_t>(v1), static_cast<int64_t>(v2))
+
+#define NUM_BITS(T) (sizeof(T) * 8)
+
+
+template <class AtomicType>
+static void TestAtomicIncrement() {
+ // For now, we just test the single-threaded execution.
+
+ // Use a guard value to make sure that NoBarrier_AtomicIncrement doesn't
+ // go outside the expected address bounds. This is to test that the
+ // 32-bit NoBarrier_AtomicIncrement doesn't do the wrong thing on 64-bit
+ // machines.
+ struct {
+ AtomicType prev_word;
+ AtomicType count;
+ AtomicType next_word;
+ } s;
+
+ AtomicType prev_word_value, next_word_value;
+ memset(&prev_word_value, 0xFF, sizeof(AtomicType));
+ memset(&next_word_value, 0xEE, sizeof(AtomicType));
+
+ s.prev_word = prev_word_value;
+ s.count = 0;
+ s.next_word = next_word_value;
+
+ CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 1), 1);
+ CHECK_EQU(s.count, 1);
+ CHECK_EQU(s.prev_word, prev_word_value);
+ CHECK_EQU(s.next_word, next_word_value);
+
+ CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 2), 3);
+ CHECK_EQU(s.count, 3);
+ CHECK_EQU(s.prev_word, prev_word_value);
+ CHECK_EQU(s.next_word, next_word_value);
+
+ CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 3), 6);
+ CHECK_EQU(s.count, 6);
+ CHECK_EQU(s.prev_word, prev_word_value);
+ CHECK_EQU(s.next_word, next_word_value);
+
+ CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -3), 3);
+ CHECK_EQU(s.count, 3);
+ CHECK_EQU(s.prev_word, prev_word_value);
+ CHECK_EQU(s.next_word, next_word_value);
+
+ CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -2), 1);
+ CHECK_EQU(s.count, 1);
+ CHECK_EQU(s.prev_word, prev_word_value);
+ CHECK_EQU(s.next_word, next_word_value);
+
+ CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -1), 0);
+ CHECK_EQU(s.count, 0);
+ CHECK_EQU(s.prev_word, prev_word_value);
+ CHECK_EQU(s.next_word, next_word_value);
+
+ CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -1), -1);
+ CHECK_EQU(s.count, -1);
+ CHECK_EQU(s.prev_word, prev_word_value);
+ CHECK_EQU(s.next_word, next_word_value);
+
+ CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -4), -5);
+ CHECK_EQU(s.count, -5);
+ CHECK_EQU(s.prev_word, prev_word_value);
+ CHECK_EQU(s.next_word, next_word_value);
+
+ CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 5), 0);
+ CHECK_EQU(s.count, 0);
+ CHECK_EQU(s.prev_word, prev_word_value);
+ CHECK_EQU(s.next_word, next_word_value);
+}
+
+
+template <class AtomicType>
+static void TestCompareAndSwap() {
+ AtomicType value = 0;
+ AtomicType prev = NoBarrier_CompareAndSwap(&value, 0, 1);
+ CHECK_EQU(1, value);
+ CHECK_EQU(0, prev);
+
+ // Use a test value that has non-zero bits in both halves, for testing
+ // the 64-bit implementation on 32-bit platforms.
+ const AtomicType k_test_val =
+ (static_cast<AtomicType>(1) << (NUM_BITS(AtomicType) - 2)) + 11;
+ value = k_test_val;
+ prev = NoBarrier_CompareAndSwap(&value, 0, 5);
+ CHECK_EQU(k_test_val, value);
+ CHECK_EQU(k_test_val, prev);
+
+ value = k_test_val;
+ prev = NoBarrier_CompareAndSwap(&value, k_test_val, 5);
+ CHECK_EQU(5, value);
+ CHECK_EQU(k_test_val, prev);
+}
+
+
+template <class AtomicType>
+static void TestAtomicExchange() {
+ AtomicType value = 0;
+ AtomicType new_value = NoBarrier_AtomicExchange(&value, 1);
+ CHECK_EQU(1, value);
+ CHECK_EQU(0, new_value);
+
+ // Use a test value that has non-zero bits in both halves, for testing
+ // the 64-bit implementation on 32-bit platforms.
+ const AtomicType k_test_val =
+ (static_cast<AtomicType>(1) << (NUM_BITS(AtomicType) - 2)) + 11;
+ value = k_test_val;
+ new_value = NoBarrier_AtomicExchange(&value, k_test_val);
+ CHECK_EQU(k_test_val, value);
+ CHECK_EQU(k_test_val, new_value);
+
+ value = k_test_val;
+ new_value = NoBarrier_AtomicExchange(&value, 5);
+ CHECK_EQU(5, value);
+ CHECK_EQU(k_test_val, new_value);
+}
+
+
+template <class AtomicType>
+static void TestAtomicIncrementBounds() {
+ // Test at rollover boundary between int_max and int_min.
+ AtomicType test_val =
+ static_cast<AtomicType>(1) << (NUM_BITS(AtomicType) - 1);
+ AtomicType value = -1 ^ test_val;
+ AtomicType new_value = NoBarrier_AtomicIncrement(&value, 1);
+ CHECK_EQU(test_val, value);
+ CHECK_EQU(value, new_value);
+
+ NoBarrier_AtomicIncrement(&value, -1);
+ CHECK_EQU(-1 ^ test_val, value);
+
+ // Test at 32-bit boundary for 64-bit atomic type.
+ test_val = static_cast<AtomicType>(1) << (NUM_BITS(AtomicType) / 2);
+ value = test_val - 1;
+ new_value = NoBarrier_AtomicIncrement(&value, 1);
+ CHECK_EQU(test_val, value);
+ CHECK_EQU(value, new_value);
+
+ NoBarrier_AtomicIncrement(&value, -1);
+ CHECK_EQU(test_val - 1, value);
+}
+
+
+// Return an AtomicType with the value 0xa5a5a5..
+template <class AtomicType>
+static AtomicType TestFillValue() {
+ AtomicType val = 0;
+ memset(&val, 0xa5, sizeof(AtomicType));
+ return val;
+}
+
+
+// This is a simple sanity check to ensure that values are correct.
+// Not testing atomicity.
+template <class AtomicType>
+static void TestStore() {
+ const AtomicType kVal1 = TestFillValue<AtomicType>();
+ const AtomicType kVal2 = static_cast<AtomicType>(-1);
+
+ AtomicType value;
+
+ NoBarrier_Store(&value, kVal1);
+ CHECK_EQU(kVal1, value);
+ NoBarrier_Store(&value, kVal2);
+ CHECK_EQU(kVal2, value);
+
+ Acquire_Store(&value, kVal1);
+ CHECK_EQU(kVal1, value);
+ Acquire_Store(&value, kVal2);
+ CHECK_EQU(kVal2, value);
+
+ Release_Store(&value, kVal1);
+ CHECK_EQU(kVal1, value);
+ Release_Store(&value, kVal2);
+ CHECK_EQU(kVal2, value);
+}
+
+
+// This is a simple sanity check to ensure that values are correct.
+// Not testing atomicity.
+template <class AtomicType>
+static void TestLoad() {
+ const AtomicType kVal1 = TestFillValue<AtomicType>();
+ const AtomicType kVal2 = static_cast<AtomicType>(-1);
+
+ AtomicType value;
+
+ value = kVal1;
+ CHECK_EQU(kVal1, NoBarrier_Load(&value));
+ value = kVal2;
+ CHECK_EQU(kVal2, NoBarrier_Load(&value));
+
+ value = kVal1;
+ CHECK_EQU(kVal1, Acquire_Load(&value));
+ value = kVal2;
+ CHECK_EQU(kVal2, Acquire_Load(&value));
+
+ value = kVal1;
+ CHECK_EQU(kVal1, Release_Load(&value));
+ value = kVal2;
+ CHECK_EQU(kVal2, Release_Load(&value));
+}
+
+
+TEST(AtomicIncrement) {
+ TestAtomicIncrement<Atomic32>();
+ TestAtomicIncrement<AtomicWord>();
+}
+
+
+TEST(CompareAndSwap) {
+ TestCompareAndSwap<Atomic32>();
+ TestCompareAndSwap<AtomicWord>();
+}
+
+
+TEST(AtomicExchange) {
+ TestAtomicExchange<Atomic32>();
+ TestAtomicExchange<AtomicWord>();
+}
+
+
+TEST(AtomicIncrementBounds) {
+ TestAtomicIncrementBounds<Atomic32>();
+ TestAtomicIncrementBounds<AtomicWord>();
+}
+
+
+TEST(Store) {
+ TestStore<Atomic32>();
+ TestStore<AtomicWord>();
+}
+
+
+TEST(Load) {
+ TestLoad<Atomic32>();
+ TestLoad<AtomicWord>();
+}
diff --git a/deps/v8/test/cctest/test-code-stubs-arm64.cc b/deps/v8/test/cctest/test-code-stubs-arm64.cc
new file mode 100644
index 000000000..7ddefdde1
--- /dev/null
+++ b/deps/v8/test/cctest/test-code-stubs-arm64.cc
@@ -0,0 +1,189 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Rrdistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Rrdistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Rrdistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "code-stubs.h"
+#include "test-code-stubs.h"
+#include "factory.h"
+#include "macro-assembler.h"
+#include "platform.h"
+#include "simulator.h"
+
+using namespace v8::internal;
+
+#define __ masm.
+
+ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
+ Register source_reg,
+ Register destination_reg,
+ bool inline_fastpath) {
+ // Allocate an executable page of memory.
+ size_t actual_size = 2 * Assembler::kMinimalBufferSize;
+ byte* buffer = static_cast<byte*>(OS::Allocate(actual_size,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles(isolate);
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
+ DoubleToIStub stub(source_reg, destination_reg, 0, true, inline_fastpath);
+
+ byte* start = stub.GetCode(isolate)->instruction_start();
+ Label done;
+
+ __ SetStackPointer(csp);
+ __ PushCalleeSavedRegisters();
+ __ Mov(jssp, csp);
+ __ SetStackPointer(jssp);
+
+ // Push the double argument.
+ __ Push(d0);
+ __ Mov(source_reg, jssp);
+
+ MacroAssembler::PushPopQueue queue(&masm);
+
+ // Save registers make sure they don't get clobbered.
+ int source_reg_offset = kDoubleSize;
+ int reg_num = 0;
+ for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+ Register reg = Register::from_code(reg_num);
+ if (!reg.is(destination_reg)) {
+ queue.Queue(reg);
+ source_reg_offset += kPointerSize;
+ }
+ }
+ // Re-push the double argument.
+ queue.Queue(d0);
+
+ queue.PushQueued();
+
+ // Call through to the actual stub
+ if (inline_fastpath) {
+ __ Ldr(d0, MemOperand(source_reg));
+ __ TryConvertDoubleToInt64(destination_reg, d0, &done);
+ if (destination_reg.is(source_reg)) {
+ // Restore clobbered source_reg.
+ __ add(source_reg, jssp, Operand(source_reg_offset));
+ }
+ }
+ __ Call(start, RelocInfo::EXTERNAL_REFERENCE);
+ __ bind(&done);
+
+ __ Drop(1, kDoubleSize);
+
+ // // Make sure no registers have been unexpectedly clobbered
+ for (--reg_num; reg_num >= 0; --reg_num) {
+ Register reg = Register::from_code(reg_num);
+ if (!reg.is(destination_reg)) {
+ __ Pop(ip0);
+ __ cmp(reg, ip0);
+ __ Assert(eq, kRegisterWasClobbered);
+ }
+ }
+
+ __ Drop(1, kDoubleSize);
+
+ if (!destination_reg.is(x0))
+ __ Mov(x0, destination_reg);
+
+ // Restore callee save registers.
+ __ Mov(csp, jssp);
+ __ SetStackPointer(csp);
+ __ PopCalleeSavedRegisters();
+
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ CPU::FlushICache(buffer, actual_size);
+ return (reinterpret_cast<ConvertDToIFunc>(
+ reinterpret_cast<intptr_t>(buffer)));
+}
+
+#undef __
+
+
+static Isolate* GetIsolateFrom(LocalContext* context) {
+ return reinterpret_cast<Isolate*>((*context)->GetIsolate());
+}
+
+
+int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
+ double from) {
+#ifdef USE_SIMULATOR
+ Simulator::CallArgument args[] = {
+ Simulator::CallArgument(from),
+ Simulator::CallArgument::End()
+ };
+ return Simulator::current(Isolate::Current())->CallInt64(
+ FUNCTION_ADDR(func), args);
+#else
+ return (*func)(from);
+#endif
+}
+
+
+TEST(ConvertDToI) {
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ HandleScope scope(isolate);
+
+#if DEBUG
+ // Verify that the tests actually work with the C version. In the release
+ // code, the compiler optimizes it away because it's all constant, but does it
+ // wrong, triggering an assert on gcc.
+ RunAllTruncationTests(&ConvertDToICVersion);
+#endif
+
+ Register source_registers[] = {jssp, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9,
+ x10, x11, x12, x13, x14, x15, x18, x19, x20,
+ x21, x22, x23, x24};
+ Register dest_registers[] = {x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11,
+ x12, x13, x14, x15, x18, x19, x20, x21, x22, x23,
+ x24};
+
+ for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) {
+ for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
+ RunAllTruncationTests(
+ RunGeneratedCodeCallWrapper,
+ MakeConvertDToIFuncTrampoline(isolate,
+ source_registers[s],
+ dest_registers[d],
+ false));
+ RunAllTruncationTests(
+ RunGeneratedCodeCallWrapper,
+ MakeConvertDToIFuncTrampoline(isolate,
+ source_registers[s],
+ dest_registers[d],
+ true));
+ }
+ }
+}
diff --git a/deps/v8/test/cctest/test-code-stubs-x64.cc b/deps/v8/test/cctest/test-code-stubs-x64.cc
index 6b3a12ccc..348b21aca 100644
--- a/deps/v8/test/cctest/test-code-stubs-x64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-x64.cc
@@ -57,11 +57,11 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
DoubleToIStub stub(source_reg, destination_reg, offset, true);
byte* start = stub.GetCode(isolate)->instruction_start();
- __ push(rbx);
- __ push(rcx);
- __ push(rdx);
- __ push(rsi);
- __ push(rdi);
+ __ pushq(rbx);
+ __ pushq(rcx);
+ __ pushq(rdx);
+ __ pushq(rsi);
+ __ pushq(rdi);
if (!source_reg.is(rsp)) {
// The argument we pass to the stub is not a heap number, but instead
@@ -70,7 +70,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// registers.
int double_argument_slot =
(Register::NumAllocatableRegisters() - 1) * kPointerSize + kDoubleSize;
- __ lea(source_reg, MemOperand(rsp, -double_argument_slot - offset));
+ __ leaq(source_reg, MemOperand(rsp, -double_argument_slot - offset));
}
// Save registers make sure they don't get clobbered.
@@ -78,7 +78,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
Register reg = Register::FromAllocationIndex(reg_num);
if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) {
- __ push(reg);
+ __ pushq(reg);
}
}
@@ -103,11 +103,11 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ movq(rax, destination_reg);
- __ pop(rdi);
- __ pop(rsi);
- __ pop(rdx);
- __ pop(rcx);
- __ pop(rbx);
+ __ popq(rdi);
+ __ popq(rsi);
+ __ popq(rdx);
+ __ popq(rcx);
+ __ popq(rbx);
__ ret(0);
diff --git a/deps/v8/test/cctest/test-code-stubs.cc b/deps/v8/test/cctest/test-code-stubs.cc
index db00e9ac5..999febf77 100644
--- a/deps/v8/test/cctest/test-code-stubs.cc
+++ b/deps/v8/test/cctest/test-code-stubs.cc
@@ -49,6 +49,9 @@ int STDCALL ConvertDToICVersion(double d) {
int32_t exponent = (((exponent_bits & shifted_mask) >>
(Double::kPhysicalSignificandSize - 32)) -
HeapNumber::kExponentBias);
+ if (exponent < 0) {
+ return 0;
+ }
uint32_t unsigned_exponent = static_cast<uint32_t>(exponent);
int result = 0;
uint32_t max_exponent =
@@ -113,10 +116,27 @@ void RunAllTruncationTests(ConvertDToICallWrapper callWrapper,
RunOneTruncationTest(Infinity, 0);
RunOneTruncationTest(-NaN, 0);
RunOneTruncationTest(-Infinity, 0);
-
- RunOneTruncationTest(4.5036e+15, 0x1635E000);
+ RunOneTruncationTest(4.94065645841e-324, 0);
+ RunOneTruncationTest(-4.94065645841e-324, 0);
+
+ RunOneTruncationTest(0.9999999999999999, 0);
+ RunOneTruncationTest(-0.9999999999999999, 0);
+ RunOneTruncationTest(4294967296.0, 0);
+ RunOneTruncationTest(-4294967296.0, 0);
+ RunOneTruncationTest(9223372036854775000.0, 4294966272.0);
+ RunOneTruncationTest(-9223372036854775000.0, -4294966272.0);
+ RunOneTruncationTest(4.5036e+15, 372629504);
RunOneTruncationTest(-4.5036e+15, -372629504);
+ RunOneTruncationTest(287524199.5377777, 0x11234567);
+ RunOneTruncationTest(-287524199.5377777, -0x11234567);
+ RunOneTruncationTest(2300193596.302222, 2300193596.0);
+ RunOneTruncationTest(-2300193596.302222, -2300193596.0);
+ RunOneTruncationTest(4600387192.604444, 305419896);
+ RunOneTruncationTest(-4600387192.604444, -305419896);
+ RunOneTruncationTest(4823855600872397.0, 1737075661);
+ RunOneTruncationTest(-4823855600872397.0, -1737075661);
+
RunOneTruncationTest(4503603922337791.0, -1);
RunOneTruncationTest(-4503603922337791.0, 1);
RunOneTruncationTest(4503601774854143.0, 2147483647);
@@ -134,10 +154,19 @@ void RunAllTruncationTests(ConvertDToICallWrapper callWrapper,
RunOneTruncationTest(4.8357078901445341e+24, -1073741824);
RunOneTruncationTest(-4.8357078901445341e+24, 1073741824);
+ RunOneTruncationTest(2147483647.0, 2147483647.0);
+ RunOneTruncationTest(-2147483648.0, -2147483648.0);
RunOneTruncationTest(9.6714111686030497e+24, -2147483648.0);
RunOneTruncationTest(-9.6714111686030497e+24, -2147483648.0);
RunOneTruncationTest(9.6714157802890681e+24, -2147483648.0);
RunOneTruncationTest(-9.6714157802890681e+24, -2147483648.0);
+ RunOneTruncationTest(1.9342813113834065e+25, 2147483648.0);
+ RunOneTruncationTest(-1.9342813113834065e+25, 2147483648.0);
+
+ RunOneTruncationTest(3.868562622766813e+25, 0);
+ RunOneTruncationTest(-3.868562622766813e+25, 0);
+ RunOneTruncationTest(1.7976931348623157e+308, 0);
+ RunOneTruncationTest(-1.7976931348623157e+308, 0);
}
#undef NaN
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index ae414d784..6540c5d28 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -51,7 +51,7 @@ static void SetGlobalProperty(const char* name, Object* value) {
isolate->factory()->InternalizeUtf8String(name);
Handle<JSObject> global(isolate->context()->global_object());
Runtime::SetObjectProperty(isolate, global, internalized_name, object, NONE,
- kNonStrictMode);
+ SLOPPY);
}
@@ -66,8 +66,7 @@ static Handle<JSFunction> Compile(const char* source) {
0,
false,
Handle<Context>(isolate->native_context()),
- NULL, NULL,
- Handle<String>::null(),
+ NULL, NULL, NO_CACHED_DATA,
NOT_NATIVES_CODE);
return isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared_function, isolate->native_context());
diff --git a/deps/v8/test/cctest/test-constantpool.cc b/deps/v8/test/cctest/test-constantpool.cc
index 9f2436c03..e16e45a57 100644
--- a/deps/v8/test/cctest/test-constantpool.cc
+++ b/deps/v8/test/cctest/test-constantpool.cc
@@ -11,6 +11,15 @@
using namespace v8::internal;
+Code* DummyCode(LocalContext* context) {
+ CompileRun("function foo() {};");
+ i::Handle<i::JSFunction> fun = v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(
+ (*context)->Global()->Get(v8_str("foo"))));
+ return fun->code();
+}
+
+
TEST(ConstantPool) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -19,32 +28,41 @@ TEST(ConstantPool) {
v8::HandleScope scope(context->GetIsolate());
// Check construction.
- Handle<ConstantPoolArray> array = factory->NewConstantPoolArray(3, 2, 1);
+ Handle<ConstantPoolArray> array = factory->NewConstantPoolArray(3, 1, 2, 1);
CHECK_EQ(array->count_of_int64_entries(), 3);
- CHECK_EQ(array->count_of_ptr_entries(), 2);
+ CHECK_EQ(array->count_of_code_ptr_entries(), 1);
+ CHECK_EQ(array->count_of_heap_ptr_entries(), 2);
CHECK_EQ(array->count_of_int32_entries(), 1);
- CHECK_EQ(array->length(), 6);
+ CHECK_EQ(array->length(), 7);
CHECK_EQ(array->first_int64_index(), 0);
- CHECK_EQ(array->first_ptr_index(), 3);
- CHECK_EQ(array->first_int32_index(), 5);
+ CHECK_EQ(array->first_code_ptr_index(), 3);
+ CHECK_EQ(array->first_heap_ptr_index(), 4);
+ CHECK_EQ(array->first_int32_index(), 6);
// Check getters and setters.
int64_t big_number = V8_2PART_UINT64_C(0x12345678, 9ABCDEF0);
Handle<Object> object = factory->NewHeapNumber(4.0);
+ Code* code = DummyCode(&context);
array->set(0, big_number);
array->set(1, 0.5);
- array->set(3, *object);
- array->set(5, 50);
+ array->set(2, 3e-24);
+ array->set(3, code->entry());
+ array->set(4, code);
+ array->set(5, *object);
+ array->set(6, 50);
CHECK_EQ(array->get_int64_entry(0), big_number);
CHECK_EQ(array->get_int64_entry_as_double(1), 0.5);
- CHECK_EQ(array->get_ptr_entry(3), *object);
- CHECK_EQ(array->get_int32_entry(5), 50);
+ CHECK_EQ(array->get_int64_entry_as_double(2), 3e-24);
+ CHECK_EQ(array->get_code_ptr_entry(3), code->entry());
+ CHECK_EQ(array->get_heap_ptr_entry(4), code);
+ CHECK_EQ(array->get_heap_ptr_entry(5), *object);
+ CHECK_EQ(array->get_int32_entry(6), 50);
// Check pointers are updated on GC.
- Object* old_ptr = array->get_ptr_entry(3);
+ Object* old_ptr = array->get_heap_ptr_entry(5);
CHECK_EQ(*object, old_ptr);
heap->CollectGarbage(NEW_SPACE);
- Object* new_ptr = array->get_ptr_entry(3);
+ Object* new_ptr = array->get_heap_ptr_entry(5);
CHECK_NE(*object, old_ptr);
CHECK_EQ(*object, new_ptr);
}
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 3bba51439..ed0b190f9 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -1495,20 +1495,16 @@ TEST(FunctionDetails) {
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
- v8::Handle<v8::Script> script_a = v8::Script::Compile(
- v8::String::NewFromUtf8(
- env->GetIsolate(),
+ v8::Handle<v8::Script> script_a = CompileWithOrigin(
" function foo\n() { try { bar(); } catch(e) {} }\n"
- " function bar() { startProfiling(); }\n"),
- v8::String::NewFromUtf8(env->GetIsolate(), "script_a"));
+ " function bar() { startProfiling(); }\n",
+ "script_a");
script_a->Run();
- v8::Handle<v8::Script> script_b = v8::Script::Compile(
- v8::String::NewFromUtf8(
- env->GetIsolate(),
+ v8::Handle<v8::Script> script_b = CompileWithOrigin(
"\n\n function baz() { try { foo(); } catch(e) {} }\n"
"\n\nbaz();\n"
- "stopProfiling();\n"),
- v8::String::NewFromUtf8(env->GetIsolate(), "script_b"));
+ "stopProfiling();\n",
+ "script_b");
script_b->Run();
const v8::CpuProfile* profile = i::ProfilerExtension::last_profile;
const v8::CpuProfileNode* current = profile->GetTopDownRoot();
diff --git a/deps/v8/test/cctest/test-date.cc b/deps/v8/test/cctest/test-date.cc
index 460c07e5a..5190729fa 100644
--- a/deps/v8/test/cctest/test-date.cc
+++ b/deps/v8/test/cctest/test-date.cc
@@ -167,3 +167,25 @@ TEST(DaylightSavingsTime) {
CheckDST(august_20 + 2 * 3600 - 1000);
CheckDST(august_20);
}
+
+
+TEST(DateCacheVersion) {
+ FLAG_allow_natives_syntax = true;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ v8::Handle<v8::Array> date_cache_version =
+ v8::Handle<v8::Array>::Cast(CompileRun("%DateCacheVersion()"));
+
+ CHECK_EQ(1, static_cast<int32_t>(date_cache_version->Length()));
+ CHECK(date_cache_version->Get(0)->IsNumber());
+ CHECK_EQ(0.0, date_cache_version->Get(0)->NumberValue());
+
+ v8::Date::DateTimeConfigurationChangeNotification(isolate);
+
+ CHECK_EQ(1, static_cast<int32_t>(date_cache_version->Length()));
+ CHECK(date_cache_version->Get(0)->IsNumber());
+ CHECK_EQ(1.0, date_cache_version->Get(0)->NumberValue());
+}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 67ef88516..b51cb7724 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -114,7 +114,7 @@ class DebugLocalContext {
v8::internal::Runtime::SetObjectProperty(isolate, global, debug_string,
Handle<Object>(debug->debug_context()->global_proxy(), isolate),
DONT_ENUM,
- ::v8::internal::kNonStrictMode);
+ ::v8::internal::SLOPPY);
}
private:
@@ -581,24 +581,6 @@ const char* frame_script_name_source =
v8::Local<v8::Function> frame_script_name;
-// Source for the JavaScript function which picks out the script data for the
-// top frame.
-const char* frame_script_data_source =
- "function frame_script_data(exec_state) {"
- " return exec_state.frame(0).func().script().data();"
- "}";
-v8::Local<v8::Function> frame_script_data;
-
-
-// Source for the JavaScript function which picks out the script data from
-// AfterCompile event
-const char* compiled_script_data_source =
- "function compiled_script_data(event_data) {"
- " return event_data.script().data();"
- "}";
-v8::Local<v8::Function> compiled_script_data;
-
-
// Source for the JavaScript function which returns the number of frames.
static const char* frame_count_source =
"function frame_count(exec_state) {"
@@ -610,10 +592,8 @@ v8::Handle<v8::Function> frame_count;
// Global variable to store the last function hit - used by some tests.
char last_function_hit[80];
-// Global variable to store the name and data for last script hit - used by some
-// tests.
+// Global variable to store the name for last script hit - used by some tests.
char last_script_name_hit[80];
-char last_script_data_hit[80];
// Global variables to store the last source position - used by some tests.
int last_source_line = -1;
@@ -626,7 +606,6 @@ static void DebugEventBreakPointHitCount(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
v8::Handle<v8::Object> exec_state = event_details.GetExecutionState();
- v8::Handle<v8::Object> event_data = event_details.GetEventData();
v8::internal::Isolate* isolate = CcTest::i_isolate();
Debug* debug = isolate->debug();
// When hitting a debug event listener there must be a break set.
@@ -687,40 +666,11 @@ static void DebugEventBreakPointHitCount(
}
}
- if (!frame_script_data.IsEmpty()) {
- // Get the script data of the function script.
- const int argc = 1;
- v8::Handle<v8::Value> argv[argc] = { exec_state };
- v8::Handle<v8::Value> result = frame_script_data->Call(exec_state,
- argc, argv);
- if (result->IsUndefined()) {
- last_script_data_hit[0] = '\0';
- } else {
- result = result->ToString();
- CHECK(result->IsString());
- v8::Handle<v8::String> script_data(result->ToString());
- script_data->WriteUtf8(last_script_data_hit);
- }
- }
-
// Perform a full deoptimization when the specified number of
// breaks have been hit.
if (break_point_hit_count == break_point_hit_count_deoptimize) {
i::Deoptimizer::DeoptimizeAll(isolate);
}
- } else if (event == v8::AfterCompile && !compiled_script_data.IsEmpty()) {
- const int argc = 1;
- v8::Handle<v8::Value> argv[argc] = { event_data };
- v8::Handle<v8::Value> result = compiled_script_data->Call(exec_state,
- argc, argv);
- if (result->IsUndefined()) {
- last_script_data_hit[0] = '\0';
- } else {
- result = result->ToString();
- CHECK(result->IsString());
- v8::Handle<v8::String> script_data(result->ToString());
- script_data->WriteUtf8(last_script_data_hit);
- }
}
}
@@ -2268,8 +2218,7 @@ TEST(ScriptBreakPointLineTopLevel) {
v8::Local<v8::Function> f;
{
v8::HandleScope scope(env->GetIsolate());
- v8::Script::Compile(
- script, v8::String::NewFromUtf8(env->GetIsolate(), "test.html"))->Run();
+ CompileRunWithOrigin(script, "test.html");
}
f = v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
@@ -2285,8 +2234,7 @@ TEST(ScriptBreakPointLineTopLevel) {
// Recompile and run script and check that break point was hit.
break_point_hit_count = 0;
- v8::Script::Compile(
- script, v8::String::NewFromUtf8(env->GetIsolate(), "test.html"))->Run();
+ CompileRunWithOrigin(script, "test.html");
CHECK_EQ(1, break_point_hit_count);
// Call f and check that there are still no break points.
@@ -2321,9 +2269,7 @@ TEST(ScriptBreakPointTopLevelCrash) {
{
v8::HandleScope scope(env->GetIsolate());
break_point_hit_count = 0;
- v8::Script::Compile(script_source,
- v8::String::NewFromUtf8(env->GetIsolate(), "test.html"))
- ->Run();
+ CompileRunWithOrigin(script_source, "test.html");
CHECK_EQ(1, break_point_hit_count);
}
@@ -6249,12 +6195,6 @@ TEST(ScriptNameAndData) {
frame_script_name = CompileFunction(&env,
frame_script_name_source,
"frame_script_name");
- frame_script_data = CompileFunction(&env,
- frame_script_data_source,
- "frame_script_data");
- compiled_script_data = CompileFunction(&env,
- compiled_script_data_source,
- "compiled_script_data");
v8::Debug::SetDebugEventListener2(DebugEventBreakPointHitCount);
@@ -6267,7 +6207,6 @@ TEST(ScriptNameAndData) {
v8::ScriptOrigin origin1 =
v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "name"));
v8::Handle<v8::Script> script1 = v8::Script::Compile(script, &origin1);
- script1->SetData(v8::String::NewFromUtf8(env->GetIsolate(), "data"));
script1->Run();
v8::Local<v8::Function> f;
f = v8::Local<v8::Function>::Cast(
@@ -6276,7 +6215,6 @@ TEST(ScriptNameAndData) {
f->Call(env->Global(), 0, NULL);
CHECK_EQ(1, break_point_hit_count);
CHECK_EQ("name", last_script_name_hit);
- CHECK_EQ("data", last_script_data_hit);
// Compile the same script again without setting data. As the compilation
// cache is disabled when debugging expect the data to be missing.
@@ -6286,7 +6224,6 @@ TEST(ScriptNameAndData) {
f->Call(env->Global(), 0, NULL);
CHECK_EQ(2, break_point_hit_count);
CHECK_EQ("name", last_script_name_hit);
- CHECK_EQ("", last_script_data_hit); // Undefined results in empty string.
v8::Local<v8::String> data_obj_source = v8::String::NewFromUtf8(
env->GetIsolate(),
@@ -6294,29 +6231,23 @@ TEST(ScriptNameAndData) {
" b: 123,\n"
" toString: function() { return this.a + ' ' + this.b; }\n"
"})\n");
- v8::Local<v8::Value> data_obj = v8::Script::Compile(data_obj_source)->Run();
+ v8::Script::Compile(data_obj_source)->Run();
v8::ScriptOrigin origin2 =
v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "new name"));
v8::Handle<v8::Script> script2 = v8::Script::Compile(script, &origin2);
script2->Run();
- script2->SetData(data_obj->ToString());
f = v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
f->Call(env->Global(), 0, NULL);
CHECK_EQ(3, break_point_hit_count);
CHECK_EQ("new name", last_script_name_hit);
- CHECK_EQ("abc 123", last_script_data_hit);
- v8::Handle<v8::Script> script3 = v8::Script::Compile(
- script, &origin2, NULL,
- v8::String::NewFromUtf8(env->GetIsolate(), "in compile"));
- CHECK_EQ("in compile", last_script_data_hit);
+ v8::Handle<v8::Script> script3 = v8::Script::Compile(script, &origin2);
script3->Run();
f = v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
f->Call(env->Global(), 0, NULL);
CHECK_EQ(4, break_point_hit_count);
- CHECK_EQ("in compile", last_script_data_hit);
}
@@ -7052,7 +6983,7 @@ TEST(Backtrace) {
v8::Handle<v8::String> void0 =
v8::String::NewFromUtf8(env->GetIsolate(), "void(0)");
- v8::Handle<v8::Script> script = v8::Script::Compile(void0, void0);
+ v8::Handle<v8::Script> script = CompileWithOrigin(void0, void0);
// Check backtrace from "void(0)" script.
BacktraceData::frame_counter = -10;
@@ -7072,18 +7003,20 @@ TEST(Backtrace) {
TEST(GetMirror) {
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
v8::Handle<v8::Value> obj =
- v8::Debug::GetMirror(v8::String::NewFromUtf8(env->GetIsolate(), "hodja"));
- v8::Handle<v8::Function> run_test =
- v8::Handle<v8::Function>::Cast(v8::Script::New(
- v8::String::NewFromUtf8(
- env->GetIsolate(),
- "function runTest(mirror) {"
- " return mirror.isString() && (mirror.length() == 5);"
- "}"
- ""
- "runTest;"))->Run());
+ v8::Debug::GetMirror(v8::String::NewFromUtf8(isolate, "hodja"));
+ v8::ScriptCompiler::Source source(v8_str(
+ "function runTest(mirror) {"
+ " return mirror.isString() && (mirror.length() == 5);"
+ "}"
+ ""
+ "runTest;"));
+ v8::Handle<v8::Function> run_test = v8::Handle<v8::Function>::Cast(
+ v8::ScriptCompiler::CompileUnbound(isolate, &source)
+ ->BindToCurrentContext()
+ ->Run());
v8::Handle<v8::Value> result = run_test->Call(env->Global(), 1, &obj);
CHECK(result->IsTrue());
}
@@ -7700,4 +7633,39 @@ TEST(LiveEditDisabled) {
}
+TEST(PrecompiledFunction) {
+ // Regression test for crbug.com/346207. If we have preparse data, parsing the
+ // function in the presence of the debugger (and breakpoints) should still
+ // succeed. The bug was that preparsing was done lazily and parsing was done
+ // eagerly, so, the symbol streams didn't match.
+ DebugLocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ env.ExposeDebug();
+ v8::Debug::SetDebugEventListener2(DebugBreakInlineListener);
+
+ v8::Local<v8::Function> break_here =
+ CompileFunction(&env, "function break_here(){}", "break_here");
+ SetBreakPoint(break_here, 0);
+
+ const char* source =
+ "var a = b = c = 1; \n"
+ "function this_is_lazy() { \n"
+ // This symbol won't appear in the preparse data.
+ " var a; \n"
+ "} \n"
+ "function bar() { \n"
+ " return \"bar\"; \n"
+ "}; \n"
+ "a = b = c = 2; \n"
+ "bar(); \n";
+ v8::Local<v8::Value> result = PreCompileCompileRun(source);
+ CHECK(result->IsString());
+ v8::String::Utf8Value utf8(result);
+ CHECK_EQ("bar", *utf8);
+
+ v8::Debug::SetDebugEventListener2(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index 1f22c9ff3..d6738a31a 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -557,7 +557,6 @@ class ExistsInPrototypeContext: public DeclarationContext {
TEST(ExistsInPrototype) {
- i::FLAG_es52_globals = true;
HandleScope scope(CcTest::isolate());
// Sanity check to make sure that the holder of the interceptor
@@ -620,7 +619,6 @@ class AbsentInPrototypeContext: public DeclarationContext {
TEST(AbsentInPrototype) {
- i::FLAG_es52_globals = true;
v8::V8::Initialize();
HandleScope scope(CcTest::isolate());
@@ -668,7 +666,6 @@ class ExistsInHiddenPrototypeContext: public DeclarationContext {
TEST(ExistsInHiddenPrototype) {
- i::FLAG_es52_globals = true;
HandleScope scope(CcTest::isolate());
{ ExistsInHiddenPrototypeContext context;
diff --git a/deps/v8/test/cctest/test-deoptimization.cc b/deps/v8/test/cctest/test-deoptimization.cc
index 4b69612f5..dbbb3edb0 100644
--- a/deps/v8/test/cctest/test-deoptimization.cc
+++ b/deps/v8/test/cctest/test-deoptimization.cc
@@ -613,7 +613,6 @@ TEST(DeoptimizeLoadICStoreIC) {
CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
CHECK_EQ(4, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -695,5 +694,4 @@ TEST(DeoptimizeLoadICStoreICNested) {
CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index cb1b1c798..5eff4206c 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -272,10 +272,10 @@ TEST(Type0) {
// We only disassemble one instruction so the eor instruction is not here.
COMPARE(eor(r5, r4, Operand(0x1234), LeaveCC, ne),
"1301c234 movwne ip, #4660");
- // Movw can't do setcc so we don't get that here. Mov immediate with setcc
- // is pretty strange anyway.
+ // Movw can't do setcc, so first move to ip, then the following instruction
+ // moves to r5. Mov immediate with setcc is pretty strange anyway.
COMPARE(mov(r5, Operand(0x01234), SetCC, ne),
- "159fc000 ldrne ip, [pc, #+0]");
+ "1301c234 movwne ip, #4660");
// Emit a literal pool now, otherwise this could be dumped later, in the
// middle of a different test.
EMIT_PENDING_LITERALS();
@@ -410,6 +410,8 @@ TEST(Type3) {
"e6843895 pkhbt r3, r4, r5, lsl #17");
COMPARE(pkhtb(r3, r4, Operand(r5, ASR, 17)),
"e68438d5 pkhtb r3, r4, r5, asr #17");
+ COMPARE(uxtb(r9, Operand(r10, ROR, 0)),
+ "e6ef907a uxtb r9, r10");
COMPARE(uxtb(r3, Operand(r4, ROR, 8)),
"e6ef3474 uxtb r3, r4, ror #8");
COMPARE(uxtab(r3, r4, Operand(r5, ROR, 8)),
@@ -687,8 +689,10 @@ TEST(Neon) {
"f421420f vld1.8 {d4, d5, d6, d7}, [r1]");
COMPARE(vst1(Neon16, NeonListOperand(d17, 4), NeonMemOperand(r9)),
"f449124f vst1.16 {d17, d18, d19, d20}, [r9]");
+ COMPARE(vmovl(NeonU8, q3, d1),
+ "f3886a11 vmovl.u8 q3, d1");
COMPARE(vmovl(NeonU8, q4, d2),
- "f3884a12 vmovl.u8 q4, d2");
+ "f3888a12 vmovl.u8 q4, d2");
}
VERIFY_RUN();
diff --git a/deps/v8/test/cctest/test-disasm-arm64.cc b/deps/v8/test/cctest/test-disasm-arm64.cc
new file mode 100644
index 000000000..3343175e9
--- /dev/null
+++ b/deps/v8/test/cctest/test-disasm-arm64.cc
@@ -0,0 +1,1763 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdio.h>
+#include <cstring>
+#include "cctest.h"
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "arm64/assembler-arm64.h"
+#include "arm64/macro-assembler-arm64.h"
+#include "arm64/decoder-arm64-inl.h"
+#include "arm64/disasm-arm64.h"
+#include "arm64/utils-arm64.h"
+
+using namespace v8::internal;
+
+#define TEST_(name) TEST(DISASM_##name)
+
+#define EXP_SIZE (256)
+#define INSTR_SIZE (1024)
+#define SET_UP_CLASS(ASMCLASS) \
+ InitializeVM(); \
+ Isolate* isolate = Isolate::Current(); \
+ HandleScope scope(isolate); \
+ byte* buf = static_cast<byte*>(malloc(INSTR_SIZE)); \
+ uint32_t encoding = 0; \
+ ASMCLASS* assm = new ASMCLASS(isolate, buf, INSTR_SIZE); \
+ Decoder<DispatchingDecoderVisitor>* decoder = \
+ new Decoder<DispatchingDecoderVisitor>(); \
+ Disassembler* disasm = new Disassembler(); \
+ decoder->AppendVisitor(disasm)
+
+#define SET_UP() SET_UP_CLASS(Assembler)
+
+#define COMPARE(ASM, EXP) \
+ assm->Reset(); \
+ assm->ASM; \
+ assm->GetCode(NULL); \
+ decoder->Decode(reinterpret_cast<Instruction*>(buf)); \
+ encoding = *reinterpret_cast<uint32_t*>(buf); \
+ if (strcmp(disasm->GetOutput(), EXP) != 0) { \
+ printf("%u : Encoding: %08" PRIx32 "\nExpected: %s\nFound: %s\n", \
+ __LINE__, encoding, EXP, disasm->GetOutput()); \
+ abort(); \
+ }
+
+#define COMPARE_PREFIX(ASM, EXP) \
+ assm->Reset(); \
+ assm->ASM; \
+ assm->GetCode(NULL); \
+ decoder->Decode(reinterpret_cast<Instruction*>(buf)); \
+ encoding = *reinterpret_cast<uint32_t*>(buf); \
+ if (strncmp(disasm->GetOutput(), EXP, strlen(EXP)) != 0) { \
+ printf("%u : Encoding: %08" PRIx32 "\nExpected: %s\nFound: %s\n", \
+ __LINE__, encoding, EXP, disasm->GetOutput()); \
+ abort(); \
+ }
+
+#define CLEANUP() \
+ delete disasm; \
+ delete decoder; \
+ delete assm
+
+
+static bool vm_initialized = false;
+
+
+static void InitializeVM() {
+ if (!vm_initialized) {
+ CcTest::InitializeVM();
+ vm_initialized = true;
+ }
+}
+
+
+TEST_(bootstrap) {
+ SET_UP();
+
+ // Instructions generated by C compiler, disassembled by objdump, and
+ // reformatted to suit our disassembly style.
+ COMPARE(dci(0xa9ba7bfd), "stp fp, lr, [csp, #-96]!");
+ COMPARE(dci(0x910003fd), "mov fp, csp");
+ COMPARE(dci(0x9100e3a0), "add x0, fp, #0x38 (56)");
+ COMPARE(dci(0xb900001f), "str wzr, [x0]");
+ COMPARE(dci(0x528000e1), "movz w1, #0x7");
+ COMPARE(dci(0xb9001c01), "str w1, [x0, #28]");
+ COMPARE(dci(0x390043a0), "strb w0, [fp, #16]");
+ COMPARE(dci(0x790027a0), "strh w0, [fp, #18]");
+ COMPARE(dci(0xb9400400), "ldr w0, [x0, #4]");
+ COMPARE(dci(0x0b000021), "add w1, w1, w0");
+ COMPARE(dci(0x531b6800), "lsl w0, w0, #5");
+ COMPARE(dci(0x521e0400), "eor w0, w0, #0xc");
+ COMPARE(dci(0x72af0f00), "movk w0, #0x7878, lsl #16");
+ COMPARE(dci(0xd360fc00), "lsr x0, x0, #32");
+ COMPARE(dci(0x13037c01), "asr w1, w0, #3");
+ COMPARE(dci(0x4b000021), "sub w1, w1, w0");
+ COMPARE(dci(0x2a0103e0), "mov w0, w1");
+ COMPARE(dci(0x93407c00), "sxtw x0, w0");
+ COMPARE(dci(0x2a000020), "orr w0, w1, w0");
+ COMPARE(dci(0xa8c67bfd), "ldp fp, lr, [csp], #96");
+
+ CLEANUP();
+}
+
+
+TEST_(mov_mvn) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(Mov(w0, Operand(0x1234)), "movz w0, #0x1234");
+ COMPARE(Mov(x1, Operand(0x1234)), "movz x1, #0x1234");
+ COMPARE(Mov(w2, Operand(w3)), "mov w2, w3");
+ COMPARE(Mov(x4, Operand(x5)), "mov x4, x5");
+ COMPARE(Mov(w6, Operand(w7, LSL, 5)), "lsl w6, w7, #5");
+ COMPARE(Mov(x8, Operand(x9, ASR, 42)), "asr x8, x9, #42");
+ COMPARE(Mov(w10, Operand(w11, UXTB)), "uxtb w10, w11");
+ COMPARE(Mov(x12, Operand(x13, UXTB, 1)), "ubfiz x12, x13, #1, #8");
+ COMPARE(Mov(w14, Operand(w15, SXTH, 2)), "sbfiz w14, w15, #2, #16");
+ COMPARE(Mov(x16, Operand(x20, SXTW, 3)), "sbfiz x16, x20, #3, #32");
+
+ COMPARE(Mov(x0, csp), "mov x0, csp");
+ COMPARE(Mov(w0, wcsp), "mov w0, wcsp");
+ COMPARE(Mov(x0, xzr), "mov x0, xzr");
+ COMPARE(Mov(w0, wzr), "mov w0, wzr");
+ COMPARE(mov(x0, csp), "mov x0, csp");
+ COMPARE(mov(w0, wcsp), "mov w0, wcsp");
+ COMPARE(mov(x0, xzr), "mov x0, xzr");
+ COMPARE(mov(w0, wzr), "mov w0, wzr");
+
+ COMPARE(Mvn(w0, Operand(0x1)), "movn w0, #0x1");
+ COMPARE(Mvn(x1, Operand(0xfff)), "movn x1, #0xfff");
+ COMPARE(Mvn(w2, Operand(w3)), "mvn w2, w3");
+ COMPARE(Mvn(x4, Operand(x5)), "mvn x4, x5");
+ COMPARE(Mvn(w6, Operand(w7, LSL, 12)), "mvn w6, w7, lsl #12");
+ COMPARE(Mvn(x8, Operand(x9, ASR, 63)), "mvn x8, x9, asr #63");
+
+ CLEANUP();
+}
+
+
+TEST_(move_immediate) {
+ SET_UP();
+
+ COMPARE(movz(w0, 0x1234), "movz w0, #0x1234");
+ COMPARE(movz(x1, 0xabcd0000), "movz x1, #0xabcd0000");
+ COMPARE(movz(x2, 0x555500000000), "movz x2, #0x555500000000");
+ COMPARE(movz(x3, 0xaaaa000000000000), "movz x3, #0xaaaa000000000000");
+ COMPARE(movz(x4, 0xabcd, 16), "movz x4, #0xabcd0000");
+ COMPARE(movz(x5, 0x5555, 32), "movz x5, #0x555500000000");
+ COMPARE(movz(x6, 0xaaaa, 48), "movz x6, #0xaaaa000000000000");
+
+ COMPARE(movk(w7, 0x1234), "movk w7, #0x1234");
+ COMPARE(movk(x8, 0xabcd0000), "movk x8, #0xabcd, lsl #16");
+ COMPARE(movk(x9, 0x555500000000), "movk x9, #0x5555, lsl #32");
+ COMPARE(movk(x10, 0xaaaa000000000000), "movk x10, #0xaaaa, lsl #48");
+ COMPARE(movk(w11, 0xabcd, 16), "movk w11, #0xabcd, lsl #16");
+ COMPARE(movk(x12, 0x5555, 32), "movk x12, #0x5555, lsl #32");
+ COMPARE(movk(x13, 0xaaaa, 48), "movk x13, #0xaaaa, lsl #48");
+
+ COMPARE(movn(w14, 0x1234), "movn w14, #0x1234");
+ COMPARE(movn(x15, 0xabcd0000), "movn x15, #0xabcd0000");
+ COMPARE(movn(x16, 0x555500000000), "movn x16, #0x555500000000");
+ COMPARE(movn(x17, 0xaaaa000000000000), "movn x17, #0xaaaa000000000000");
+ COMPARE(movn(w18, 0xabcd, 16), "movn w18, #0xabcd0000");
+ COMPARE(movn(x19, 0x5555, 32), "movn x19, #0x555500000000");
+ COMPARE(movn(x20, 0xaaaa, 48), "movn x20, #0xaaaa000000000000");
+
+ COMPARE(movk(w21, 0), "movk w21, #0x0");
+ COMPARE(movk(x22, 0, 0), "movk x22, #0x0");
+ COMPARE(movk(w23, 0, 16), "movk w23, #0x0, lsl #16");
+ COMPARE(movk(x24, 0, 32), "movk x24, #0x0, lsl #32");
+ COMPARE(movk(x25, 0, 48), "movk x25, #0x0, lsl #48");
+
+ CLEANUP();
+}
+
+
+TEST(move_immediate_2) {
+ SET_UP_CLASS(MacroAssembler);
+
+ // Move instructions expected for certain immediates. This is really a macro
+ // assembler test, to ensure it generates immediates efficiently.
+ COMPARE(Mov(w0, 0), "movz w0, #0x0");
+ COMPARE(Mov(w0, 0x0000ffff), "movz w0, #0xffff");
+ COMPARE(Mov(w0, 0x00010000), "movz w0, #0x10000");
+ COMPARE(Mov(w0, 0xffff0000), "movz w0, #0xffff0000");
+ COMPARE(Mov(w0, 0x0001ffff), "movn w0, #0xfffe0000");
+ COMPARE(Mov(w0, 0xffff8000), "movn w0, #0x7fff");
+ COMPARE(Mov(w0, 0xfffffffe), "movn w0, #0x1");
+ COMPARE(Mov(w0, 0xffffffff), "movn w0, #0x0");
+ COMPARE(Mov(w0, 0x00ffff00), "mov w0, #0xffff00");
+ COMPARE(Mov(w0, 0xfffe7fff), "mov w0, #0xfffe7fff");
+ COMPARE(Mov(w0, 0xfffeffff), "movn w0, #0x10000");
+ COMPARE(Mov(w0, 0xffff7fff), "movn w0, #0x8000");
+
+ COMPARE(Mov(x0, 0), "movz x0, #0x0");
+ COMPARE(Mov(x0, 0x0000ffff), "movz x0, #0xffff");
+ COMPARE(Mov(x0, 0x00010000), "movz x0, #0x10000");
+ COMPARE(Mov(x0, 0xffff0000), "movz x0, #0xffff0000");
+ COMPARE(Mov(x0, 0x0001ffff), "mov x0, #0x1ffff");
+ COMPARE(Mov(x0, 0xffff8000), "mov x0, #0xffff8000");
+ COMPARE(Mov(x0, 0xfffffffe), "mov x0, #0xfffffffe");
+ COMPARE(Mov(x0, 0xffffffff), "mov x0, #0xffffffff");
+ COMPARE(Mov(x0, 0x00ffff00), "mov x0, #0xffff00");
+ COMPARE(Mov(x0, 0xffff000000000000), "movz x0, #0xffff000000000000");
+ COMPARE(Mov(x0, 0x0000ffff00000000), "movz x0, #0xffff00000000");
+ COMPARE(Mov(x0, 0x00000000ffff0000), "movz x0, #0xffff0000");
+ COMPARE(Mov(x0, 0xffffffffffff0000), "movn x0, #0xffff");
+ COMPARE(Mov(x0, 0xffffffff0000ffff), "movn x0, #0xffff0000");
+ COMPARE(Mov(x0, 0xffff0000ffffffff), "movn x0, #0xffff00000000");
+ COMPARE(Mov(x0, 0x0000ffffffffffff), "movn x0, #0xffff000000000000");
+ COMPARE(Mov(x0, 0xfffe7fffffffffff), "mov x0, #0xfffe7fffffffffff");
+ COMPARE(Mov(x0, 0xfffeffffffffffff), "movn x0, #0x1000000000000");
+ COMPARE(Mov(x0, 0xffff7fffffffffff), "movn x0, #0x800000000000");
+ COMPARE(Mov(x0, 0xfffffffe7fffffff), "mov x0, #0xfffffffe7fffffff");
+ COMPARE(Mov(x0, 0xfffffffeffffffff), "movn x0, #0x100000000");
+ COMPARE(Mov(x0, 0xffffffff7fffffff), "movn x0, #0x80000000");
+ COMPARE(Mov(x0, 0xfffffffffffe7fff), "mov x0, #0xfffffffffffe7fff");
+ COMPARE(Mov(x0, 0xfffffffffffeffff), "movn x0, #0x10000");
+ COMPARE(Mov(x0, 0xffffffffffff7fff), "movn x0, #0x8000");
+ COMPARE(Mov(x0, 0xffffffffffffffff), "movn x0, #0x0");
+
+ COMPARE(Movk(w0, 0x1234, 0), "movk w0, #0x1234");
+ COMPARE(Movk(x1, 0x2345, 0), "movk x1, #0x2345");
+ COMPARE(Movk(w2, 0x3456, 16), "movk w2, #0x3456, lsl #16");
+ COMPARE(Movk(x3, 0x4567, 16), "movk x3, #0x4567, lsl #16");
+ COMPARE(Movk(x4, 0x5678, 32), "movk x4, #0x5678, lsl #32");
+ COMPARE(Movk(x5, 0x6789, 48), "movk x5, #0x6789, lsl #48");
+
+ CLEANUP();
+}
+
+
+TEST_(add_immediate) {
+ SET_UP();
+
+ COMPARE(add(w0, w1, Operand(0xff)), "add w0, w1, #0xff (255)");
+ COMPARE(add(x2, x3, Operand(0x3ff)), "add x2, x3, #0x3ff (1023)");
+ COMPARE(add(w4, w5, Operand(0xfff)), "add w4, w5, #0xfff (4095)");
+ COMPARE(add(x6, x7, Operand(0x1000)), "add x6, x7, #0x1000 (4096)");
+ COMPARE(add(w8, w9, Operand(0xff000)), "add w8, w9, #0xff000 (1044480)");
+ COMPARE(add(x10, x11, Operand(0x3ff000)),
+ "add x10, x11, #0x3ff000 (4190208)");
+ COMPARE(add(w12, w13, Operand(0xfff000)),
+ "add w12, w13, #0xfff000 (16773120)");
+ COMPARE(adds(w14, w15, Operand(0xff)), "adds w14, w15, #0xff (255)");
+ COMPARE(adds(x16, x17, Operand(0xaa000)),
+ "adds x16, x17, #0xaa000 (696320)");
+ COMPARE(cmn(w18, Operand(0xff)), "cmn w18, #0xff (255)");
+ COMPARE(cmn(x19, Operand(0xff000)), "cmn x19, #0xff000 (1044480)");
+ COMPARE(add(w0, wcsp, Operand(0)), "mov w0, wcsp");
+ COMPARE(add(csp, x0, Operand(0)), "mov csp, x0");
+
+ COMPARE(add(w1, wcsp, Operand(8)), "add w1, wcsp, #0x8 (8)");
+ COMPARE(add(x2, csp, Operand(16)), "add x2, csp, #0x10 (16)");
+ COMPARE(add(wcsp, wcsp, Operand(42)), "add wcsp, wcsp, #0x2a (42)");
+ COMPARE(cmn(csp, Operand(24)), "cmn csp, #0x18 (24)");
+ COMPARE(adds(wzr, wcsp, Operand(9)), "cmn wcsp, #0x9 (9)");
+
+ CLEANUP();
+}
+
+
+TEST_(sub_immediate) {
+ SET_UP();
+
+ COMPARE(sub(w0, w1, Operand(0xff)), "sub w0, w1, #0xff (255)");
+ COMPARE(sub(x2, x3, Operand(0x3ff)), "sub x2, x3, #0x3ff (1023)");
+ COMPARE(sub(w4, w5, Operand(0xfff)), "sub w4, w5, #0xfff (4095)");
+ COMPARE(sub(x6, x7, Operand(0x1000)), "sub x6, x7, #0x1000 (4096)");
+ COMPARE(sub(w8, w9, Operand(0xff000)), "sub w8, w9, #0xff000 (1044480)");
+ COMPARE(sub(x10, x11, Operand(0x3ff000)),
+ "sub x10, x11, #0x3ff000 (4190208)");
+ COMPARE(sub(w12, w13, Operand(0xfff000)),
+ "sub w12, w13, #0xfff000 (16773120)");
+ COMPARE(subs(w14, w15, Operand(0xff)), "subs w14, w15, #0xff (255)");
+ COMPARE(subs(x16, x17, Operand(0xaa000)),
+ "subs x16, x17, #0xaa000 (696320)");
+ COMPARE(cmp(w18, Operand(0xff)), "cmp w18, #0xff (255)");
+ COMPARE(cmp(x19, Operand(0xff000)), "cmp x19, #0xff000 (1044480)");
+
+ COMPARE(add(w1, wcsp, Operand(8)), "add w1, wcsp, #0x8 (8)");
+ COMPARE(add(x2, csp, Operand(16)), "add x2, csp, #0x10 (16)");
+ COMPARE(add(wcsp, wcsp, Operand(42)), "add wcsp, wcsp, #0x2a (42)");
+ COMPARE(cmn(csp, Operand(24)), "cmn csp, #0x18 (24)");
+ COMPARE(adds(wzr, wcsp, Operand(9)), "cmn wcsp, #0x9 (9)");
+
+ CLEANUP();
+}
+
+
+TEST_(add_shifted) {
+ SET_UP();
+
+ COMPARE(add(w0, w1, Operand(w2)), "add w0, w1, w2");
+ COMPARE(add(x3, x4, Operand(x5)), "add x3, x4, x5");
+ COMPARE(add(w6, w7, Operand(w8, LSL, 1)), "add w6, w7, w8, lsl #1");
+ COMPARE(add(x9, x10, Operand(x11, LSL, 2)), "add x9, x10, x11, lsl #2");
+ COMPARE(add(w12, w13, Operand(w14, LSR, 3)), "add w12, w13, w14, lsr #3");
+ COMPARE(add(x15, x16, Operand(x17, LSR, 4)), "add x15, x16, x17, lsr #4");
+ COMPARE(add(w18, w19, Operand(w20, ASR, 5)), "add w18, w19, w20, asr #5");
+ COMPARE(add(x21, x22, Operand(x23, ASR, 6)), "add x21, x22, x23, asr #6");
+ COMPARE(cmn(w24, Operand(w25)), "cmn w24, w25");
+ COMPARE(cmn(x26, Operand(cp, LSL, 63)), "cmn x26, cp, lsl #63");
+
+ COMPARE(add(x0, csp, Operand(x1)), "add x0, csp, x1");
+ COMPARE(add(w2, wcsp, Operand(w3)), "add w2, wcsp, w3");
+ COMPARE(add(x4, csp, Operand(x5, LSL, 1)), "add x4, csp, x5, lsl #1");
+ COMPARE(add(x4, xzr, Operand(x5, LSL, 1)), "add x4, xzr, x5, lsl #1");
+ COMPARE(add(w6, wcsp, Operand(w7, LSL, 3)), "add w6, wcsp, w7, lsl #3");
+ COMPARE(adds(xzr, csp, Operand(x8, LSL, 4)), "cmn csp, x8, lsl #4");
+ COMPARE(adds(xzr, xzr, Operand(x8, LSL, 5)), "cmn xzr, x8, lsl #5");
+
+ CLEANUP();
+}
+
+
+TEST_(sub_shifted) {
+ SET_UP();
+
+ COMPARE(sub(w0, w1, Operand(w2)), "sub w0, w1, w2");
+ COMPARE(sub(x3, x4, Operand(x5)), "sub x3, x4, x5");
+ COMPARE(sub(w6, w7, Operand(w8, LSL, 1)), "sub w6, w7, w8, lsl #1");
+ COMPARE(sub(x9, x10, Operand(x11, LSL, 2)), "sub x9, x10, x11, lsl #2");
+ COMPARE(sub(w12, w13, Operand(w14, LSR, 3)), "sub w12, w13, w14, lsr #3");
+ COMPARE(sub(x15, x16, Operand(x17, LSR, 4)), "sub x15, x16, x17, lsr #4");
+ COMPARE(sub(w18, w19, Operand(w20, ASR, 5)), "sub w18, w19, w20, asr #5");
+ COMPARE(sub(x21, x22, Operand(x23, ASR, 6)), "sub x21, x22, x23, asr #6");
+ COMPARE(cmp(w24, Operand(w25)), "cmp w24, w25");
+ COMPARE(cmp(x26, Operand(cp, LSL, 63)), "cmp x26, cp, lsl #63");
+ COMPARE(neg(w28, Operand(w29)), "neg w28, w29");
+ COMPARE(neg(lr, Operand(x0, LSR, 62)), "neg lr, x0, lsr #62");
+ COMPARE(negs(w1, Operand(w2)), "negs w1, w2");
+ COMPARE(negs(x3, Operand(x4, ASR, 61)), "negs x3, x4, asr #61");
+
+ COMPARE(sub(x0, csp, Operand(x1)), "sub x0, csp, x1");
+ COMPARE(sub(w2, wcsp, Operand(w3)), "sub w2, wcsp, w3");
+ COMPARE(sub(x4, csp, Operand(x5, LSL, 1)), "sub x4, csp, x5, lsl #1");
+ COMPARE(sub(x4, xzr, Operand(x5, LSL, 1)), "neg x4, x5, lsl #1");
+ COMPARE(sub(w6, wcsp, Operand(w7, LSL, 3)), "sub w6, wcsp, w7, lsl #3");
+ COMPARE(subs(xzr, csp, Operand(x8, LSL, 4)), "cmp csp, x8, lsl #4");
+ COMPARE(subs(xzr, xzr, Operand(x8, LSL, 5)), "cmp xzr, x8, lsl #5");
+
+ CLEANUP();
+}
+
+
+TEST_(add_extended) {
+ SET_UP();
+
+ COMPARE(add(w0, w1, Operand(w2, UXTB)), "add w0, w1, w2, uxtb");
+ COMPARE(adds(x3, x4, Operand(w5, UXTB, 1)), "adds x3, x4, w5, uxtb #1");
+ COMPARE(add(w6, w7, Operand(w8, UXTH, 2)), "add w6, w7, w8, uxth #2");
+ COMPARE(adds(x9, x10, Operand(x11, UXTW, 3)), "adds x9, x10, w11, uxtw #3");
+ COMPARE(add(x12, x13, Operand(x14, UXTX, 4)), "add x12, x13, x14, uxtx #4");
+ COMPARE(adds(w15, w16, Operand(w17, SXTB, 4)), "adds w15, w16, w17, sxtb #4");
+ COMPARE(add(x18, x19, Operand(x20, SXTB, 3)), "add x18, x19, w20, sxtb #3");
+ COMPARE(adds(w21, w22, Operand(w23, SXTH, 2)), "adds w21, w22, w23, sxth #2");
+ COMPARE(add(x24, x25, Operand(x26, SXTW, 1)), "add x24, x25, w26, sxtw #1");
+ COMPARE(adds(cp, jssp, Operand(fp, SXTX)), "adds cp, jssp, fp, sxtx");
+ COMPARE(cmn(w0, Operand(w1, UXTB, 2)), "cmn w0, w1, uxtb #2");
+ COMPARE(cmn(x2, Operand(x3, SXTH, 4)), "cmn x2, w3, sxth #4");
+
+ COMPARE(add(w0, wcsp, Operand(w1, UXTB)), "add w0, wcsp, w1, uxtb");
+ COMPARE(add(x2, csp, Operand(x3, UXTH, 1)), "add x2, csp, w3, uxth #1");
+ COMPARE(add(wcsp, wcsp, Operand(w4, UXTW, 2)), "add wcsp, wcsp, w4, lsl #2");
+ COMPARE(cmn(csp, Operand(xzr, UXTX, 3)), "cmn csp, xzr, lsl #3");
+ COMPARE(cmn(csp, Operand(xzr, LSL, 4)), "cmn csp, xzr, lsl #4");
+
+ CLEANUP();
+}
+
+
+TEST_(sub_extended) {
+ SET_UP();
+
+ COMPARE(sub(w0, w1, Operand(w2, UXTB)), "sub w0, w1, w2, uxtb");
+ COMPARE(subs(x3, x4, Operand(w5, UXTB, 1)), "subs x3, x4, w5, uxtb #1");
+ COMPARE(sub(w6, w7, Operand(w8, UXTH, 2)), "sub w6, w7, w8, uxth #2");
+ COMPARE(subs(x9, x10, Operand(x11, UXTW, 3)), "subs x9, x10, w11, uxtw #3");
+ COMPARE(sub(x12, x13, Operand(x14, UXTX, 4)), "sub x12, x13, x14, uxtx #4");
+ COMPARE(subs(w15, w16, Operand(w17, SXTB, 4)), "subs w15, w16, w17, sxtb #4");
+ COMPARE(sub(x18, x19, Operand(x20, SXTB, 3)), "sub x18, x19, w20, sxtb #3");
+ COMPARE(subs(w21, w22, Operand(w23, SXTH, 2)), "subs w21, w22, w23, sxth #2");
+ COMPARE(sub(x24, x25, Operand(x26, SXTW, 1)), "sub x24, x25, w26, sxtw #1");
+ COMPARE(subs(cp, jssp, Operand(fp, SXTX)), "subs cp, jssp, fp, sxtx");
+ COMPARE(cmp(w0, Operand(w1, SXTB, 1)), "cmp w0, w1, sxtb #1");
+ COMPARE(cmp(x2, Operand(x3, UXTH, 3)), "cmp x2, w3, uxth #3");
+
+ COMPARE(sub(w0, wcsp, Operand(w1, UXTB)), "sub w0, wcsp, w1, uxtb");
+ COMPARE(sub(x2, csp, Operand(x3, UXTH, 1)), "sub x2, csp, w3, uxth #1");
+ COMPARE(sub(wcsp, wcsp, Operand(w4, UXTW, 2)), "sub wcsp, wcsp, w4, lsl #2");
+ COMPARE(cmp(csp, Operand(xzr, UXTX, 3)), "cmp csp, xzr, lsl #3");
+ COMPARE(cmp(csp, Operand(xzr, LSL, 4)), "cmp csp, xzr, lsl #4");
+
+ CLEANUP();
+}
+
+
+TEST_(adc_subc_ngc) {
+ SET_UP();
+
+ COMPARE(adc(w0, w1, Operand(w2)), "adc w0, w1, w2");
+ COMPARE(adc(x3, x4, Operand(x5)), "adc x3, x4, x5");
+ COMPARE(adcs(w6, w7, Operand(w8)), "adcs w6, w7, w8");
+ COMPARE(adcs(x9, x10, Operand(x11)), "adcs x9, x10, x11");
+ COMPARE(sbc(w12, w13, Operand(w14)), "sbc w12, w13, w14");
+ COMPARE(sbc(x15, x16, Operand(x17)), "sbc x15, x16, x17");
+ COMPARE(sbcs(w18, w19, Operand(w20)), "sbcs w18, w19, w20");
+ COMPARE(sbcs(x21, x22, Operand(x23)), "sbcs x21, x22, x23");
+ COMPARE(ngc(w24, Operand(w25)), "ngc w24, w25");
+ COMPARE(ngc(x26, Operand(cp)), "ngc x26, cp");
+ COMPARE(ngcs(w28, Operand(w29)), "ngcs w28, w29");
+ COMPARE(ngcs(lr, Operand(x0)), "ngcs lr, x0");
+
+ CLEANUP();
+}
+
+
+TEST_(mul_and_div) {
+ SET_UP();
+
+ COMPARE(mul(w0, w1, w2), "mul w0, w1, w2");
+ COMPARE(mul(x3, x4, x5), "mul x3, x4, x5");
+ COMPARE(mul(w30, w0, w1), "mul w30, w0, w1");
+ COMPARE(mul(lr, x0, x1), "mul lr, x0, x1");
+ COMPARE(mneg(w0, w1, w2), "mneg w0, w1, w2");
+ COMPARE(mneg(x3, x4, x5), "mneg x3, x4, x5");
+ COMPARE(mneg(w30, w0, w1), "mneg w30, w0, w1");
+ COMPARE(mneg(lr, x0, x1), "mneg lr, x0, x1");
+ COMPARE(smull(x0, w0, w1), "smull x0, w0, w1");
+ COMPARE(smull(lr, w30, w0), "smull lr, w30, w0");
+ COMPARE(smulh(x0, x1, x2), "smulh x0, x1, x2");
+
+ COMPARE(madd(w0, w1, w2, w3), "madd w0, w1, w2, w3");
+ COMPARE(madd(x4, x5, x6, x7), "madd x4, x5, x6, x7");
+ COMPARE(madd(w8, w9, w10, wzr), "mul w8, w9, w10");
+ COMPARE(madd(x11, x12, x13, xzr), "mul x11, x12, x13");
+ COMPARE(msub(w14, w15, w16, w17), "msub w14, w15, w16, w17");
+ COMPARE(msub(x18, x19, x20, x21), "msub x18, x19, x20, x21");
+ COMPARE(msub(w22, w23, w24, wzr), "mneg w22, w23, w24");
+ COMPARE(msub(x25, x26, x0, xzr), "mneg x25, x26, x0");
+
+ COMPARE(sdiv(w0, w1, w2), "sdiv w0, w1, w2");
+ COMPARE(sdiv(x3, x4, x5), "sdiv x3, x4, x5");
+ COMPARE(udiv(w6, w7, w8), "udiv w6, w7, w8");
+ COMPARE(udiv(x9, x10, x11), "udiv x9, x10, x11");
+
+ CLEANUP();
+}
+
+
+TEST(maddl_msubl) {
+ SET_UP();
+
+ COMPARE(smaddl(x0, w1, w2, x3), "smaddl x0, w1, w2, x3");
+ COMPARE(smaddl(x25, w21, w22, x16), "smaddl x25, w21, w22, x16");
+ COMPARE(umaddl(x0, w1, w2, x3), "umaddl x0, w1, w2, x3");
+ COMPARE(umaddl(x25, w21, w22, x16), "umaddl x25, w21, w22, x16");
+
+ COMPARE(smsubl(x0, w1, w2, x3), "smsubl x0, w1, w2, x3");
+ COMPARE(smsubl(x25, w21, w22, x16), "smsubl x25, w21, w22, x16");
+ COMPARE(umsubl(x0, w1, w2, x3), "umsubl x0, w1, w2, x3");
+ COMPARE(umsubl(x25, w21, w22, x16), "umsubl x25, w21, w22, x16");
+
+ CLEANUP();
+}
+
+
+TEST_(dp_1_source) {
+ SET_UP();
+
+ COMPARE(rbit(w0, w1), "rbit w0, w1");
+ COMPARE(rbit(x2, x3), "rbit x2, x3");
+ COMPARE(rev16(w4, w5), "rev16 w4, w5");
+ COMPARE(rev16(x6, x7), "rev16 x6, x7");
+ COMPARE(rev32(x8, x9), "rev32 x8, x9");
+ COMPARE(rev(w10, w11), "rev w10, w11");
+ COMPARE(rev(x12, x13), "rev x12, x13");
+ COMPARE(clz(w14, w15), "clz w14, w15");
+ COMPARE(clz(x16, x17), "clz x16, x17");
+ COMPARE(cls(w18, w19), "cls w18, w19");
+ COMPARE(cls(x20, x21), "cls x20, x21");
+
+ CLEANUP();
+}
+
+
+TEST_(bitfield) {
+ SET_UP();
+
+ COMPARE(sxtb(w0, w1), "sxtb w0, w1");
+ COMPARE(sxtb(x2, x3), "sxtb x2, w3");
+ COMPARE(sxth(w4, w5), "sxth w4, w5");
+ COMPARE(sxth(x6, x7), "sxth x6, w7");
+ COMPARE(sxtw(x8, x9), "sxtw x8, w9");
+ COMPARE(sxtb(x0, w1), "sxtb x0, w1");
+ COMPARE(sxth(x2, w3), "sxth x2, w3");
+ COMPARE(sxtw(x4, w5), "sxtw x4, w5");
+
+ COMPARE(uxtb(w10, w11), "uxtb w10, w11");
+ COMPARE(uxtb(x12, x13), "uxtb x12, w13");
+ COMPARE(uxth(w14, w15), "uxth w14, w15");
+ COMPARE(uxth(x16, x17), "uxth x16, w17");
+ COMPARE(uxtw(x18, x19), "ubfx x18, x19, #0, #32");
+
+ COMPARE(asr(w20, w21, 10), "asr w20, w21, #10");
+ COMPARE(asr(x22, x23, 20), "asr x22, x23, #20");
+ COMPARE(lsr(w24, w25, 10), "lsr w24, w25, #10");
+ COMPARE(lsr(x26, cp, 20), "lsr x26, cp, #20");
+ COMPARE(lsl(w28, w29, 10), "lsl w28, w29, #10");
+ COMPARE(lsl(lr, x0, 20), "lsl lr, x0, #20");
+
+ COMPARE(sbfiz(w1, w2, 1, 20), "sbfiz w1, w2, #1, #20");
+ COMPARE(sbfiz(x3, x4, 2, 19), "sbfiz x3, x4, #2, #19");
+ COMPARE(sbfx(w5, w6, 3, 18), "sbfx w5, w6, #3, #18");
+ COMPARE(sbfx(x7, x8, 4, 17), "sbfx x7, x8, #4, #17");
+ COMPARE(bfi(w9, w10, 5, 16), "bfi w9, w10, #5, #16");
+ COMPARE(bfi(x11, x12, 6, 15), "bfi x11, x12, #6, #15");
+ COMPARE(bfxil(w13, w14, 7, 14), "bfxil w13, w14, #7, #14");
+ COMPARE(bfxil(x15, x16, 8, 13), "bfxil x15, x16, #8, #13");
+ COMPARE(ubfiz(w17, w18, 9, 12), "ubfiz w17, w18, #9, #12");
+ COMPARE(ubfiz(x19, x20, 10, 11), "ubfiz x19, x20, #10, #11");
+ COMPARE(ubfx(w21, w22, 11, 10), "ubfx w21, w22, #11, #10");
+ COMPARE(ubfx(x23, x24, 12, 9), "ubfx x23, x24, #12, #9");
+
+ CLEANUP();
+}
+
+
+TEST_(extract) {
+ SET_UP();
+
+ COMPARE(extr(w0, w1, w2, 0), "extr w0, w1, w2, #0");
+ COMPARE(extr(x3, x4, x5, 1), "extr x3, x4, x5, #1");
+ COMPARE(extr(w6, w7, w8, 31), "extr w6, w7, w8, #31");
+ COMPARE(extr(x9, x10, x11, 63), "extr x9, x10, x11, #63");
+ COMPARE(extr(w12, w13, w13, 10), "ror w12, w13, #10");
+ COMPARE(extr(x14, x15, x15, 42), "ror x14, x15, #42");
+
+ CLEANUP();
+}
+
+
+TEST_(logical_immediate) {
+ SET_UP();
+ #define RESULT_SIZE (256)
+
+ char result[RESULT_SIZE];
+
+ // Test immediate encoding - 64-bit destination.
+ // 64-bit patterns.
+ uint64_t value = 0x7fffffff;
+ for (int i = 0; i < 64; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 32-bit patterns.
+ value = 0x00003fff00003fffL;
+ for (int i = 0; i < 32; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 16-bit patterns.
+ value = 0x001f001f001f001fL;
+ for (int i = 0; i < 16; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 8-bit patterns.
+ value = 0x0e0e0e0e0e0e0e0eL;
+ for (int i = 0; i < 8; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 4-bit patterns.
+ value = 0x6666666666666666L;
+ for (int i = 0; i < 4; i++) {
+ snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
+ COMPARE(and_(x0, x0, Operand(value)), result);
+ value = ((value & 1) << 63) | (value >> 1); // Rotate right 1 bit.
+ }
+
+ // 2-bit patterns.
+ COMPARE(and_(x0, x0, Operand(0x5555555555555555L)),
+ "and x0, x0, #0x5555555555555555");
+ COMPARE(and_(x0, x0, Operand(0xaaaaaaaaaaaaaaaaL)),
+ "and x0, x0, #0xaaaaaaaaaaaaaaaa");
+
+ // Test immediate encoding - 32-bit destination.
+ COMPARE(and_(w0, w0, Operand(0xff8007ff)),
+ "and w0, w0, #0xff8007ff"); // 32-bit pattern.
+ COMPARE(and_(w0, w0, Operand(0xf87ff87f)),
+ "and w0, w0, #0xf87ff87f"); // 16-bit pattern.
+ COMPARE(and_(w0, w0, Operand(0x87878787)),
+ "and w0, w0, #0x87878787"); // 8-bit pattern.
+ COMPARE(and_(w0, w0, Operand(0x66666666)),
+ "and w0, w0, #0x66666666"); // 4-bit pattern.
+ COMPARE(and_(w0, w0, Operand(0x55555555)),
+ "and w0, w0, #0x55555555"); // 2-bit pattern.
+
+ // Test other instructions.
+ COMPARE(tst(w1, Operand(0x11111111)),
+ "tst w1, #0x11111111");
+ COMPARE(tst(x2, Operand(0x8888888888888888L)),
+ "tst x2, #0x8888888888888888");
+ COMPARE(orr(w7, w8, Operand(0xaaaaaaaa)),
+ "orr w7, w8, #0xaaaaaaaa");
+ COMPARE(orr(x9, x10, Operand(0x5555555555555555L)),
+ "orr x9, x10, #0x5555555555555555");
+ COMPARE(eor(w15, w16, Operand(0x00000001)),
+ "eor w15, w16, #0x1");
+ COMPARE(eor(x17, x18, Operand(0x0000000000000003L)),
+ "eor x17, x18, #0x3");
+ COMPARE(ands(w23, w24, Operand(0x0000000f)), "ands w23, w24, #0xf");
+ COMPARE(ands(x25, x26, Operand(0x800000000000000fL)),
+ "ands x25, x26, #0x800000000000000f");
+
+ // Test inverse.
+ COMPARE(bic(w3, w4, Operand(0x20202020)),
+ "and w3, w4, #0xdfdfdfdf");
+ COMPARE(bic(x5, x6, Operand(0x4040404040404040L)),
+ "and x5, x6, #0xbfbfbfbfbfbfbfbf");
+ COMPARE(orn(w11, w12, Operand(0x40004000)),
+ "orr w11, w12, #0xbfffbfff");
+ COMPARE(orn(x13, x14, Operand(0x8181818181818181L)),
+ "orr x13, x14, #0x7e7e7e7e7e7e7e7e");
+ COMPARE(eon(w19, w20, Operand(0x80000001)),
+ "eor w19, w20, #0x7ffffffe");
+ COMPARE(eon(x21, x22, Operand(0xc000000000000003L)),
+ "eor x21, x22, #0x3ffffffffffffffc");
+ COMPARE(bics(w27, w28, Operand(0xfffffff7)), "ands w27, w28, #0x8");
+ COMPARE(bics(fp, x0, Operand(0xfffffffeffffffffL)),
+ "ands fp, x0, #0x100000000");
+
+ // Test stack pointer.
+ COMPARE(and_(wcsp, wzr, Operand(7)), "and wcsp, wzr, #0x7");
+ COMPARE(ands(xzr, xzr, Operand(7)), "tst xzr, #0x7");
+ COMPARE(orr(csp, xzr, Operand(15)), "orr csp, xzr, #0xf");
+ COMPARE(eor(wcsp, w0, Operand(31)), "eor wcsp, w0, #0x1f");
+
+ // Test move aliases.
+ COMPARE(orr(w0, wzr, Operand(0x00000780)), "orr w0, wzr, #0x780");
+ COMPARE(orr(w1, wzr, Operand(0x00007800)), "orr w1, wzr, #0x7800");
+ COMPARE(orr(w2, wzr, Operand(0x00078000)), "mov w2, #0x78000");
+ COMPARE(orr(w3, wzr, Operand(0x00780000)), "orr w3, wzr, #0x780000");
+ COMPARE(orr(w4, wzr, Operand(0x07800000)), "orr w4, wzr, #0x7800000");
+ COMPARE(orr(x5, xzr, Operand(0xffffffffffffc001UL)),
+ "orr x5, xzr, #0xffffffffffffc001");
+ COMPARE(orr(x6, xzr, Operand(0xfffffffffffc001fUL)),
+ "mov x6, #0xfffffffffffc001f");
+ COMPARE(orr(x7, xzr, Operand(0xffffffffffc001ffUL)),
+ "mov x7, #0xffffffffffc001ff");
+ COMPARE(orr(x8, xzr, Operand(0xfffffffffc001fffUL)),
+ "mov x8, #0xfffffffffc001fff");
+ COMPARE(orr(x9, xzr, Operand(0xffffffffc001ffffUL)),
+ "orr x9, xzr, #0xffffffffc001ffff");
+
+ CLEANUP();
+}
+
+
+TEST_(logical_shifted) {
+ SET_UP();
+
+ COMPARE(and_(w0, w1, Operand(w2)), "and w0, w1, w2");
+ COMPARE(and_(x3, x4, Operand(x5, LSL, 1)), "and x3, x4, x5, lsl #1");
+ COMPARE(and_(w6, w7, Operand(w8, LSR, 2)), "and w6, w7, w8, lsr #2");
+ COMPARE(and_(x9, x10, Operand(x11, ASR, 3)), "and x9, x10, x11, asr #3");
+ COMPARE(and_(w12, w13, Operand(w14, ROR, 4)), "and w12, w13, w14, ror #4");
+
+ COMPARE(bic(w15, w16, Operand(w17)), "bic w15, w16, w17");
+ COMPARE(bic(x18, x19, Operand(x20, LSL, 5)), "bic x18, x19, x20, lsl #5");
+ COMPARE(bic(w21, w22, Operand(w23, LSR, 6)), "bic w21, w22, w23, lsr #6");
+ COMPARE(bic(x24, x25, Operand(x26, ASR, 7)), "bic x24, x25, x26, asr #7");
+ COMPARE(bic(w27, w28, Operand(w29, ROR, 8)), "bic w27, w28, w29, ror #8");
+
+ COMPARE(orr(w0, w1, Operand(w2)), "orr w0, w1, w2");
+ COMPARE(orr(x3, x4, Operand(x5, LSL, 9)), "orr x3, x4, x5, lsl #9");
+ COMPARE(orr(w6, w7, Operand(w8, LSR, 10)), "orr w6, w7, w8, lsr #10");
+ COMPARE(orr(x9, x10, Operand(x11, ASR, 11)), "orr x9, x10, x11, asr #11");
+ COMPARE(orr(w12, w13, Operand(w14, ROR, 12)), "orr w12, w13, w14, ror #12");
+
+ COMPARE(orn(w15, w16, Operand(w17)), "orn w15, w16, w17");
+ COMPARE(orn(x18, x19, Operand(x20, LSL, 13)), "orn x18, x19, x20, lsl #13");
+ COMPARE(orn(w21, w22, Operand(w23, LSR, 14)), "orn w21, w22, w23, lsr #14");
+ COMPARE(orn(x24, x25, Operand(x26, ASR, 15)), "orn x24, x25, x26, asr #15");
+ COMPARE(orn(w27, w28, Operand(w29, ROR, 16)), "orn w27, w28, w29, ror #16");
+
+ COMPARE(eor(w0, w1, Operand(w2)), "eor w0, w1, w2");
+ COMPARE(eor(x3, x4, Operand(x5, LSL, 17)), "eor x3, x4, x5, lsl #17");
+ COMPARE(eor(w6, w7, Operand(w8, LSR, 18)), "eor w6, w7, w8, lsr #18");
+ COMPARE(eor(x9, x10, Operand(x11, ASR, 19)), "eor x9, x10, x11, asr #19");
+ COMPARE(eor(w12, w13, Operand(w14, ROR, 20)), "eor w12, w13, w14, ror #20");
+
+ COMPARE(eon(w15, w16, Operand(w17)), "eon w15, w16, w17");
+ COMPARE(eon(x18, x19, Operand(x20, LSL, 21)), "eon x18, x19, x20, lsl #21");
+ COMPARE(eon(w21, w22, Operand(w23, LSR, 22)), "eon w21, w22, w23, lsr #22");
+ COMPARE(eon(x24, x25, Operand(x26, ASR, 23)), "eon x24, x25, x26, asr #23");
+ COMPARE(eon(w27, w28, Operand(w29, ROR, 24)), "eon w27, w28, w29, ror #24");
+
+ COMPARE(ands(w0, w1, Operand(w2)), "ands w0, w1, w2");
+ COMPARE(ands(x3, x4, Operand(x5, LSL, 1)), "ands x3, x4, x5, lsl #1");
+ COMPARE(ands(w6, w7, Operand(w8, LSR, 2)), "ands w6, w7, w8, lsr #2");
+ COMPARE(ands(x9, x10, Operand(x11, ASR, 3)), "ands x9, x10, x11, asr #3");
+ COMPARE(ands(w12, w13, Operand(w14, ROR, 4)), "ands w12, w13, w14, ror #4");
+
+ COMPARE(bics(w15, w16, Operand(w17)), "bics w15, w16, w17");
+ COMPARE(bics(x18, x19, Operand(x20, LSL, 5)), "bics x18, x19, x20, lsl #5");
+ COMPARE(bics(w21, w22, Operand(w23, LSR, 6)), "bics w21, w22, w23, lsr #6");
+ COMPARE(bics(x24, x25, Operand(x26, ASR, 7)), "bics x24, x25, x26, asr #7");
+ COMPARE(bics(w27, w28, Operand(w29, ROR, 8)), "bics w27, w28, w29, ror #8");
+
+ COMPARE(tst(w0, Operand(w1)), "tst w0, w1");
+ COMPARE(tst(w2, Operand(w3, ROR, 10)), "tst w2, w3, ror #10");
+ COMPARE(tst(x0, Operand(x1)), "tst x0, x1");
+ COMPARE(tst(x2, Operand(x3, ROR, 42)), "tst x2, x3, ror #42");
+
+ COMPARE(orn(w0, wzr, Operand(w1)), "mvn w0, w1");
+ COMPARE(orn(w2, wzr, Operand(w3, ASR, 5)), "mvn w2, w3, asr #5");
+ COMPARE(orn(x0, xzr, Operand(x1)), "mvn x0, x1");
+ COMPARE(orn(x2, xzr, Operand(x3, ASR, 42)), "mvn x2, x3, asr #42");
+
+ COMPARE(orr(w0, wzr, Operand(w1)), "mov w0, w1");
+ COMPARE(orr(x0, xzr, Operand(x1)), "mov x0, x1");
+ COMPARE(orr(w16, wzr, Operand(w17, LSL, 1)), "orr w16, wzr, w17, lsl #1");
+ COMPARE(orr(x16, xzr, Operand(x17, ASR, 2)), "orr x16, xzr, x17, asr #2");
+
+ CLEANUP();
+}
+
+
+TEST_(dp_2_source) {
+ SET_UP();
+
+ COMPARE(lslv(w0, w1, w2), "lsl w0, w1, w2");
+ COMPARE(lslv(x3, x4, x5), "lsl x3, x4, x5");
+ COMPARE(lsrv(w6, w7, w8), "lsr w6, w7, w8");
+ COMPARE(lsrv(x9, x10, x11), "lsr x9, x10, x11");
+ COMPARE(asrv(w12, w13, w14), "asr w12, w13, w14");
+ COMPARE(asrv(x15, x16, x17), "asr x15, x16, x17");
+ COMPARE(rorv(w18, w19, w20), "ror w18, w19, w20");
+ COMPARE(rorv(x21, x22, x23), "ror x21, x22, x23");
+
+ CLEANUP();
+}
+
+
+TEST_(adr) {
+ SET_UP();
+
+ COMPARE_PREFIX(adr(x0, 0), "adr x0, #+0x0");
+ COMPARE_PREFIX(adr(x1, 1), "adr x1, #+0x1");
+ COMPARE_PREFIX(adr(x2, -1), "adr x2, #-0x1");
+ COMPARE_PREFIX(adr(x3, 4), "adr x3, #+0x4");
+ COMPARE_PREFIX(adr(x4, -4), "adr x4, #-0x4");
+ COMPARE_PREFIX(adr(x5, 0x000fffff), "adr x5, #+0xfffff");
+ COMPARE_PREFIX(adr(x6, -0x00100000), "adr x6, #-0x100000");
+ COMPARE_PREFIX(adr(xzr, 0), "adr xzr, #+0x0");
+
+ CLEANUP();
+}
+
+
+TEST_(branch) {
+ SET_UP();
+
+ #define INST_OFF(x) ((x) >> kInstructionSizeLog2)
+ COMPARE_PREFIX(b(INST_OFF(0x4)), "b #+0x4");
+ COMPARE_PREFIX(b(INST_OFF(-0x4)), "b #-0x4");
+ COMPARE_PREFIX(b(INST_OFF(0x7fffffc)), "b #+0x7fffffc");
+ COMPARE_PREFIX(b(INST_OFF(-0x8000000)), "b #-0x8000000");
+ COMPARE_PREFIX(b(INST_OFF(0xffffc), eq), "b.eq #+0xffffc");
+ COMPARE_PREFIX(b(INST_OFF(-0x100000), mi), "b.mi #-0x100000");
+ COMPARE_PREFIX(bl(INST_OFF(0x4)), "bl #+0x4");
+ COMPARE_PREFIX(bl(INST_OFF(-0x4)), "bl #-0x4");
+ COMPARE_PREFIX(bl(INST_OFF(0xffffc)), "bl #+0xffffc");
+ COMPARE_PREFIX(bl(INST_OFF(-0x100000)), "bl #-0x100000");
+ COMPARE_PREFIX(cbz(w0, INST_OFF(0xffffc)), "cbz w0, #+0xffffc");
+ COMPARE_PREFIX(cbz(x1, INST_OFF(-0x100000)), "cbz x1, #-0x100000");
+ COMPARE_PREFIX(cbnz(w2, INST_OFF(0xffffc)), "cbnz w2, #+0xffffc");
+ COMPARE_PREFIX(cbnz(x3, INST_OFF(-0x100000)), "cbnz x3, #-0x100000");
+ COMPARE_PREFIX(tbz(w4, 0, INST_OFF(0x7ffc)), "tbz w4, #0, #+0x7ffc");
+ COMPARE_PREFIX(tbz(x5, 63, INST_OFF(-0x8000)), "tbz x5, #63, #-0x8000");
+ COMPARE_PREFIX(tbz(w6, 31, INST_OFF(0)), "tbz w6, #31, #+0x0");
+ COMPARE_PREFIX(tbz(x7, 31, INST_OFF(0x4)), "tbz w7, #31, #+0x4");
+ COMPARE_PREFIX(tbz(x8, 32, INST_OFF(0x8)), "tbz x8, #32, #+0x8");
+ COMPARE_PREFIX(tbnz(w8, 0, INST_OFF(0x7ffc)), "tbnz w8, #0, #+0x7ffc");
+ COMPARE_PREFIX(tbnz(x9, 63, INST_OFF(-0x8000)), "tbnz x9, #63, #-0x8000");
+ COMPARE_PREFIX(tbnz(w10, 31, INST_OFF(0)), "tbnz w10, #31, #+0x0");
+ COMPARE_PREFIX(tbnz(x11, 31, INST_OFF(0x4)), "tbnz w11, #31, #+0x4");
+ COMPARE_PREFIX(tbnz(x12, 32, INST_OFF(0x8)), "tbnz x12, #32, #+0x8");
+ COMPARE(br(x0), "br x0");
+ COMPARE(blr(x1), "blr x1");
+ COMPARE(ret(x2), "ret x2");
+ COMPARE(ret(lr), "ret")
+
+ CLEANUP();
+}
+
+
+TEST_(load_store) {
+ SET_UP();
+
+ COMPARE(ldr(w0, MemOperand(x1)), "ldr w0, [x1]");
+ COMPARE(ldr(w2, MemOperand(x3, 4)), "ldr w2, [x3, #4]");
+ COMPARE(ldr(w4, MemOperand(x5, 16380)), "ldr w4, [x5, #16380]");
+ COMPARE(ldr(x6, MemOperand(x7)), "ldr x6, [x7]");
+ COMPARE(ldr(x8, MemOperand(x9, 8)), "ldr x8, [x9, #8]");
+ COMPARE(ldr(x10, MemOperand(x11, 32760)), "ldr x10, [x11, #32760]");
+ COMPARE(str(w12, MemOperand(x13)), "str w12, [x13]");
+ COMPARE(str(w14, MemOperand(x15, 4)), "str w14, [x15, #4]");
+ COMPARE(str(w16, MemOperand(x17, 16380)), "str w16, [x17, #16380]");
+ COMPARE(str(x18, MemOperand(x19)), "str x18, [x19]");
+ COMPARE(str(x20, MemOperand(x21, 8)), "str x20, [x21, #8]");
+ COMPARE(str(x22, MemOperand(x23, 32760)), "str x22, [x23, #32760]");
+
+ COMPARE(ldr(w0, MemOperand(x1, 4, PreIndex)), "ldr w0, [x1, #4]!");
+ COMPARE(ldr(w2, MemOperand(x3, 255, PreIndex)), "ldr w2, [x3, #255]!");
+ COMPARE(ldr(w4, MemOperand(x5, -256, PreIndex)), "ldr w4, [x5, #-256]!");
+ COMPARE(ldr(x6, MemOperand(x7, 8, PreIndex)), "ldr x6, [x7, #8]!");
+ COMPARE(ldr(x8, MemOperand(x9, 255, PreIndex)), "ldr x8, [x9, #255]!");
+ COMPARE(ldr(x10, MemOperand(x11, -256, PreIndex)), "ldr x10, [x11, #-256]!");
+ COMPARE(str(w12, MemOperand(x13, 4, PreIndex)), "str w12, [x13, #4]!");
+ COMPARE(str(w14, MemOperand(x15, 255, PreIndex)), "str w14, [x15, #255]!");
+ COMPARE(str(w16, MemOperand(x17, -256, PreIndex)), "str w16, [x17, #-256]!");
+ COMPARE(str(x18, MemOperand(x19, 8, PreIndex)), "str x18, [x19, #8]!");
+ COMPARE(str(x20, MemOperand(x21, 255, PreIndex)), "str x20, [x21, #255]!");
+ COMPARE(str(x22, MemOperand(x23, -256, PreIndex)), "str x22, [x23, #-256]!");
+
+ COMPARE(ldr(w0, MemOperand(x1, 4, PostIndex)), "ldr w0, [x1], #4");
+ COMPARE(ldr(w2, MemOperand(x3, 255, PostIndex)), "ldr w2, [x3], #255");
+ COMPARE(ldr(w4, MemOperand(x5, -256, PostIndex)), "ldr w4, [x5], #-256");
+ COMPARE(ldr(x6, MemOperand(x7, 8, PostIndex)), "ldr x6, [x7], #8");
+ COMPARE(ldr(x8, MemOperand(x9, 255, PostIndex)), "ldr x8, [x9], #255");
+ COMPARE(ldr(x10, MemOperand(x11, -256, PostIndex)), "ldr x10, [x11], #-256");
+ COMPARE(str(w12, MemOperand(x13, 4, PostIndex)), "str w12, [x13], #4");
+ COMPARE(str(w14, MemOperand(x15, 255, PostIndex)), "str w14, [x15], #255");
+ COMPARE(str(w16, MemOperand(x17, -256, PostIndex)), "str w16, [x17], #-256");
+ COMPARE(str(x18, MemOperand(x19, 8, PostIndex)), "str x18, [x19], #8");
+ COMPARE(str(x20, MemOperand(x21, 255, PostIndex)), "str x20, [x21], #255");
+ COMPARE(str(x22, MemOperand(x23, -256, PostIndex)), "str x22, [x23], #-256");
+
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldr(w24, MemOperand(jssp)), "ldr w24, [jssp]");
+ COMPARE(ldr(x25, MemOperand(jssp, 8)), "ldr x25, [jssp, #8]");
+ COMPARE(str(w26, MemOperand(jssp, 4, PreIndex)), "str w26, [jssp, #4]!");
+ COMPARE(str(cp, MemOperand(jssp, -8, PostIndex)), "str cp, [jssp], #-8");
+
+ COMPARE(ldrsw(x0, MemOperand(x1)), "ldrsw x0, [x1]");
+ COMPARE(ldrsw(x2, MemOperand(x3, 8)), "ldrsw x2, [x3, #8]");
+ COMPARE(ldrsw(x4, MemOperand(x5, 42, PreIndex)), "ldrsw x4, [x5, #42]!");
+ COMPARE(ldrsw(x6, MemOperand(x7, -11, PostIndex)), "ldrsw x6, [x7], #-11");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_regoffset) {
+ SET_UP();
+
+ COMPARE(ldr(w0, MemOperand(x1, w2, UXTW)), "ldr w0, [x1, w2, uxtw]");
+ COMPARE(ldr(w3, MemOperand(x4, w5, UXTW, 2)), "ldr w3, [x4, w5, uxtw #2]");
+ COMPARE(ldr(w6, MemOperand(x7, x8)), "ldr w6, [x7, x8]");
+ COMPARE(ldr(w9, MemOperand(x10, x11, LSL, 2)), "ldr w9, [x10, x11, lsl #2]");
+ COMPARE(ldr(w12, MemOperand(x13, w14, SXTW)), "ldr w12, [x13, w14, sxtw]");
+ COMPARE(ldr(w15, MemOperand(x16, w17, SXTW, 2)),
+ "ldr w15, [x16, w17, sxtw #2]");
+ COMPARE(ldr(w18, MemOperand(x19, x20, SXTX)), "ldr w18, [x19, x20, sxtx]");
+ COMPARE(ldr(w21, MemOperand(x22, x23, SXTX, 2)),
+ "ldr w21, [x22, x23, sxtx #2]");
+ COMPARE(ldr(x0, MemOperand(x1, w2, UXTW)), "ldr x0, [x1, w2, uxtw]");
+ COMPARE(ldr(x3, MemOperand(x4, w5, UXTW, 3)), "ldr x3, [x4, w5, uxtw #3]");
+ COMPARE(ldr(x6, MemOperand(x7, x8)), "ldr x6, [x7, x8]");
+ COMPARE(ldr(x9, MemOperand(x10, x11, LSL, 3)), "ldr x9, [x10, x11, lsl #3]");
+ COMPARE(ldr(x12, MemOperand(x13, w14, SXTW)), "ldr x12, [x13, w14, sxtw]");
+ COMPARE(ldr(x15, MemOperand(x16, w17, SXTW, 3)),
+ "ldr x15, [x16, w17, sxtw #3]");
+ COMPARE(ldr(x18, MemOperand(x19, x20, SXTX)), "ldr x18, [x19, x20, sxtx]");
+ COMPARE(ldr(x21, MemOperand(x22, x23, SXTX, 3)),
+ "ldr x21, [x22, x23, sxtx #3]");
+
+ COMPARE(str(w0, MemOperand(x1, w2, UXTW)), "str w0, [x1, w2, uxtw]");
+ COMPARE(str(w3, MemOperand(x4, w5, UXTW, 2)), "str w3, [x4, w5, uxtw #2]");
+ COMPARE(str(w6, MemOperand(x7, x8)), "str w6, [x7, x8]");
+ COMPARE(str(w9, MemOperand(x10, x11, LSL, 2)), "str w9, [x10, x11, lsl #2]");
+ COMPARE(str(w12, MemOperand(x13, w14, SXTW)), "str w12, [x13, w14, sxtw]");
+ COMPARE(str(w15, MemOperand(x16, w17, SXTW, 2)),
+ "str w15, [x16, w17, sxtw #2]");
+ COMPARE(str(w18, MemOperand(x19, x20, SXTX)), "str w18, [x19, x20, sxtx]");
+ COMPARE(str(w21, MemOperand(x22, x23, SXTX, 2)),
+ "str w21, [x22, x23, sxtx #2]");
+ COMPARE(str(x0, MemOperand(x1, w2, UXTW)), "str x0, [x1, w2, uxtw]");
+ COMPARE(str(x3, MemOperand(x4, w5, UXTW, 3)), "str x3, [x4, w5, uxtw #3]");
+ COMPARE(str(x6, MemOperand(x7, x8)), "str x6, [x7, x8]");
+ COMPARE(str(x9, MemOperand(x10, x11, LSL, 3)), "str x9, [x10, x11, lsl #3]");
+ COMPARE(str(x12, MemOperand(x13, w14, SXTW)), "str x12, [x13, w14, sxtw]");
+ COMPARE(str(x15, MemOperand(x16, w17, SXTW, 3)),
+ "str x15, [x16, w17, sxtw #3]");
+ COMPARE(str(x18, MemOperand(x19, x20, SXTX)), "str x18, [x19, x20, sxtx]");
+ COMPARE(str(x21, MemOperand(x22, x23, SXTX, 3)),
+ "str x21, [x22, x23, sxtx #3]");
+
+ COMPARE(ldrb(w0, MemOperand(x1, w2, UXTW)), "ldrb w0, [x1, w2, uxtw]");
+ COMPARE(ldrb(w6, MemOperand(x7, x8)), "ldrb w6, [x7, x8]");
+ COMPARE(ldrb(w12, MemOperand(x13, w14, SXTW)), "ldrb w12, [x13, w14, sxtw]");
+ COMPARE(ldrb(w18, MemOperand(x19, x20, SXTX)), "ldrb w18, [x19, x20, sxtx]");
+ COMPARE(strb(w0, MemOperand(x1, w2, UXTW)), "strb w0, [x1, w2, uxtw]");
+ COMPARE(strb(w6, MemOperand(x7, x8)), "strb w6, [x7, x8]");
+ COMPARE(strb(w12, MemOperand(x13, w14, SXTW)), "strb w12, [x13, w14, sxtw]");
+ COMPARE(strb(w18, MemOperand(x19, x20, SXTX)), "strb w18, [x19, x20, sxtx]");
+
+ COMPARE(ldrh(w0, MemOperand(x1, w2, UXTW)), "ldrh w0, [x1, w2, uxtw]");
+ COMPARE(ldrh(w3, MemOperand(x4, w5, UXTW, 1)), "ldrh w3, [x4, w5, uxtw #1]");
+ COMPARE(ldrh(w6, MemOperand(x7, x8)), "ldrh w6, [x7, x8]");
+ COMPARE(ldrh(w9, MemOperand(x10, x11, LSL, 1)),
+ "ldrh w9, [x10, x11, lsl #1]");
+ COMPARE(ldrh(w12, MemOperand(x13, w14, SXTW)), "ldrh w12, [x13, w14, sxtw]");
+ COMPARE(ldrh(w15, MemOperand(x16, w17, SXTW, 1)),
+ "ldrh w15, [x16, w17, sxtw #1]");
+ COMPARE(ldrh(w18, MemOperand(x19, x20, SXTX)), "ldrh w18, [x19, x20, sxtx]");
+ COMPARE(ldrh(w21, MemOperand(x22, x23, SXTX, 1)),
+ "ldrh w21, [x22, x23, sxtx #1]");
+ COMPARE(strh(w0, MemOperand(x1, w2, UXTW)), "strh w0, [x1, w2, uxtw]");
+ COMPARE(strh(w3, MemOperand(x4, w5, UXTW, 1)), "strh w3, [x4, w5, uxtw #1]");
+ COMPARE(strh(w6, MemOperand(x7, x8)), "strh w6, [x7, x8]");
+ COMPARE(strh(w9, MemOperand(x10, x11, LSL, 1)),
+ "strh w9, [x10, x11, lsl #1]");
+ COMPARE(strh(w12, MemOperand(x13, w14, SXTW)), "strh w12, [x13, w14, sxtw]");
+ COMPARE(strh(w15, MemOperand(x16, w17, SXTW, 1)),
+ "strh w15, [x16, w17, sxtw #1]");
+ COMPARE(strh(w18, MemOperand(x19, x20, SXTX)), "strh w18, [x19, x20, sxtx]");
+ COMPARE(strh(w21, MemOperand(x22, x23, SXTX, 1)),
+ "strh w21, [x22, x23, sxtx #1]");
+
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldr(x0, MemOperand(jssp, wzr, SXTW)), "ldr x0, [jssp, wzr, sxtw]");
+ COMPARE(str(x1, MemOperand(jssp, xzr)), "str x1, [jssp, xzr]");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_byte) {
+ SET_UP();
+
+ COMPARE(ldrb(w0, MemOperand(x1)), "ldrb w0, [x1]");
+ COMPARE(ldrb(x2, MemOperand(x3)), "ldrb w2, [x3]");
+ COMPARE(ldrb(w4, MemOperand(x5, 4095)), "ldrb w4, [x5, #4095]");
+ COMPARE(ldrb(w6, MemOperand(x7, 255, PreIndex)), "ldrb w6, [x7, #255]!");
+ COMPARE(ldrb(w8, MemOperand(x9, -256, PreIndex)), "ldrb w8, [x9, #-256]!");
+ COMPARE(ldrb(w10, MemOperand(x11, 255, PostIndex)), "ldrb w10, [x11], #255");
+ COMPARE(ldrb(w12, MemOperand(x13, -256, PostIndex)),
+ "ldrb w12, [x13], #-256");
+ COMPARE(strb(w14, MemOperand(x15)), "strb w14, [x15]");
+ COMPARE(strb(x16, MemOperand(x17)), "strb w16, [x17]");
+ COMPARE(strb(w18, MemOperand(x19, 4095)), "strb w18, [x19, #4095]");
+ COMPARE(strb(w20, MemOperand(x21, 255, PreIndex)), "strb w20, [x21, #255]!");
+ COMPARE(strb(w22, MemOperand(x23, -256, PreIndex)),
+ "strb w22, [x23, #-256]!");
+ COMPARE(strb(w24, MemOperand(x25, 255, PostIndex)), "strb w24, [x25], #255");
+ COMPARE(strb(w26, MemOperand(cp, -256, PostIndex)),
+ "strb w26, [cp], #-256");
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldrb(w28, MemOperand(jssp, 3, PostIndex)), "ldrb w28, [jssp], #3");
+ COMPARE(strb(fp, MemOperand(jssp, -42, PreIndex)), "strb w29, [jssp, #-42]!");
+ COMPARE(ldrsb(w0, MemOperand(x1)), "ldrsb w0, [x1]");
+ COMPARE(ldrsb(x2, MemOperand(x3, 8)), "ldrsb x2, [x3, #8]");
+ COMPARE(ldrsb(w4, MemOperand(x5, 42, PreIndex)), "ldrsb w4, [x5, #42]!");
+ COMPARE(ldrsb(x6, MemOperand(x7, -11, PostIndex)), "ldrsb x6, [x7], #-11");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_half) {
+ SET_UP();
+
+ COMPARE(ldrh(w0, MemOperand(x1)), "ldrh w0, [x1]");
+ COMPARE(ldrh(x2, MemOperand(x3)), "ldrh w2, [x3]");
+ COMPARE(ldrh(w4, MemOperand(x5, 8190)), "ldrh w4, [x5, #8190]");
+ COMPARE(ldrh(w6, MemOperand(x7, 255, PreIndex)), "ldrh w6, [x7, #255]!");
+ COMPARE(ldrh(w8, MemOperand(x9, -256, PreIndex)), "ldrh w8, [x9, #-256]!");
+ COMPARE(ldrh(w10, MemOperand(x11, 255, PostIndex)), "ldrh w10, [x11], #255");
+ COMPARE(ldrh(w12, MemOperand(x13, -256, PostIndex)),
+ "ldrh w12, [x13], #-256");
+ COMPARE(strh(w14, MemOperand(x15)), "strh w14, [x15]");
+ COMPARE(strh(x16, MemOperand(x17)), "strh w16, [x17]");
+ COMPARE(strh(w18, MemOperand(x19, 8190)), "strh w18, [x19, #8190]");
+ COMPARE(strh(w20, MemOperand(x21, 255, PreIndex)), "strh w20, [x21, #255]!");
+ COMPARE(strh(w22, MemOperand(x23, -256, PreIndex)),
+ "strh w22, [x23, #-256]!");
+ COMPARE(strh(w24, MemOperand(x25, 255, PostIndex)), "strh w24, [x25], #255");
+ COMPARE(strh(w26, MemOperand(cp, -256, PostIndex)),
+ "strh w26, [cp], #-256");
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldrh(w28, MemOperand(jssp, 3, PostIndex)), "ldrh w28, [jssp], #3");
+ COMPARE(strh(fp, MemOperand(jssp, -42, PreIndex)), "strh w29, [jssp, #-42]!");
+ COMPARE(ldrh(w30, MemOperand(x0, 255)), "ldurh w30, [x0, #255]");
+ COMPARE(ldrh(x1, MemOperand(x2, -256)), "ldurh w1, [x2, #-256]");
+ COMPARE(strh(w3, MemOperand(x4, 255)), "sturh w3, [x4, #255]");
+ COMPARE(strh(x5, MemOperand(x6, -256)), "sturh w5, [x6, #-256]");
+ COMPARE(ldrsh(w0, MemOperand(x1)), "ldrsh w0, [x1]");
+ COMPARE(ldrsh(w2, MemOperand(x3, 8)), "ldrsh w2, [x3, #8]");
+ COMPARE(ldrsh(w4, MemOperand(x5, 42, PreIndex)), "ldrsh w4, [x5, #42]!");
+ COMPARE(ldrsh(x6, MemOperand(x7, -11, PostIndex)), "ldrsh x6, [x7], #-11");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_fp) {
+ SET_UP();
+
+ COMPARE(ldr(s0, MemOperand(x1)), "ldr s0, [x1]");
+ COMPARE(ldr(s2, MemOperand(x3, 4)), "ldr s2, [x3, #4]");
+ COMPARE(ldr(s4, MemOperand(x5, 16380)), "ldr s4, [x5, #16380]");
+ COMPARE(ldr(d6, MemOperand(x7)), "ldr d6, [x7]");
+ COMPARE(ldr(d8, MemOperand(x9, 8)), "ldr d8, [x9, #8]");
+ COMPARE(ldr(d10, MemOperand(x11, 32760)), "ldr d10, [x11, #32760]");
+ COMPARE(str(s12, MemOperand(x13)), "str s12, [x13]");
+ COMPARE(str(s14, MemOperand(x15, 4)), "str s14, [x15, #4]");
+ COMPARE(str(s16, MemOperand(x17, 16380)), "str s16, [x17, #16380]");
+ COMPARE(str(d18, MemOperand(x19)), "str d18, [x19]");
+ COMPARE(str(d20, MemOperand(x21, 8)), "str d20, [x21, #8]");
+ COMPARE(str(d22, MemOperand(x23, 32760)), "str d22, [x23, #32760]");
+
+ COMPARE(ldr(s0, MemOperand(x1, 4, PreIndex)), "ldr s0, [x1, #4]!");
+ COMPARE(ldr(s2, MemOperand(x3, 255, PreIndex)), "ldr s2, [x3, #255]!");
+ COMPARE(ldr(s4, MemOperand(x5, -256, PreIndex)), "ldr s4, [x5, #-256]!");
+ COMPARE(ldr(d6, MemOperand(x7, 8, PreIndex)), "ldr d6, [x7, #8]!");
+ COMPARE(ldr(d8, MemOperand(x9, 255, PreIndex)), "ldr d8, [x9, #255]!");
+ COMPARE(ldr(d10, MemOperand(x11, -256, PreIndex)), "ldr d10, [x11, #-256]!");
+ COMPARE(str(s12, MemOperand(x13, 4, PreIndex)), "str s12, [x13, #4]!");
+ COMPARE(str(s14, MemOperand(x15, 255, PreIndex)), "str s14, [x15, #255]!");
+ COMPARE(str(s16, MemOperand(x17, -256, PreIndex)), "str s16, [x17, #-256]!");
+ COMPARE(str(d18, MemOperand(x19, 8, PreIndex)), "str d18, [x19, #8]!");
+ COMPARE(str(d20, MemOperand(x21, 255, PreIndex)), "str d20, [x21, #255]!");
+ COMPARE(str(d22, MemOperand(x23, -256, PreIndex)), "str d22, [x23, #-256]!");
+
+ COMPARE(ldr(s0, MemOperand(x1, 4, PostIndex)), "ldr s0, [x1], #4");
+ COMPARE(ldr(s2, MemOperand(x3, 255, PostIndex)), "ldr s2, [x3], #255");
+ COMPARE(ldr(s4, MemOperand(x5, -256, PostIndex)), "ldr s4, [x5], #-256");
+ COMPARE(ldr(d6, MemOperand(x7, 8, PostIndex)), "ldr d6, [x7], #8");
+ COMPARE(ldr(d8, MemOperand(x9, 255, PostIndex)), "ldr d8, [x9], #255");
+ COMPARE(ldr(d10, MemOperand(x11, -256, PostIndex)), "ldr d10, [x11], #-256");
+ COMPARE(str(s12, MemOperand(x13, 4, PostIndex)), "str s12, [x13], #4");
+ COMPARE(str(s14, MemOperand(x15, 255, PostIndex)), "str s14, [x15], #255");
+ COMPARE(str(s16, MemOperand(x17, -256, PostIndex)), "str s16, [x17], #-256");
+ COMPARE(str(d18, MemOperand(x19, 8, PostIndex)), "str d18, [x19], #8");
+ COMPARE(str(d20, MemOperand(x21, 255, PostIndex)), "str d20, [x21], #255");
+ COMPARE(str(d22, MemOperand(x23, -256, PostIndex)), "str d22, [x23], #-256");
+
+ // TODO(all): Fix this for jssp.
+ COMPARE(ldr(s24, MemOperand(jssp)), "ldr s24, [jssp]");
+ COMPARE(ldr(d25, MemOperand(jssp, 8)), "ldr d25, [jssp, #8]");
+ COMPARE(str(s26, MemOperand(jssp, 4, PreIndex)), "str s26, [jssp, #4]!");
+ COMPARE(str(d27, MemOperand(jssp, -8, PostIndex)), "str d27, [jssp], #-8");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_unscaled) {
+ SET_UP();
+
+ COMPARE(ldr(w0, MemOperand(x1, 1)), "ldur w0, [x1, #1]");
+ COMPARE(ldr(w2, MemOperand(x3, -1)), "ldur w2, [x3, #-1]");
+ COMPARE(ldr(w4, MemOperand(x5, 255)), "ldur w4, [x5, #255]");
+ COMPARE(ldr(w6, MemOperand(x7, -256)), "ldur w6, [x7, #-256]");
+ COMPARE(ldr(x8, MemOperand(x9, 1)), "ldur x8, [x9, #1]");
+ COMPARE(ldr(x10, MemOperand(x11, -1)), "ldur x10, [x11, #-1]");
+ COMPARE(ldr(x12, MemOperand(x13, 255)), "ldur x12, [x13, #255]");
+ COMPARE(ldr(x14, MemOperand(x15, -256)), "ldur x14, [x15, #-256]");
+ COMPARE(str(w16, MemOperand(x17, 1)), "stur w16, [x17, #1]");
+ COMPARE(str(w18, MemOperand(x19, -1)), "stur w18, [x19, #-1]");
+ COMPARE(str(w20, MemOperand(x21, 255)), "stur w20, [x21, #255]");
+ COMPARE(str(w22, MemOperand(x23, -256)), "stur w22, [x23, #-256]");
+ COMPARE(str(x24, MemOperand(x25, 1)), "stur x24, [x25, #1]");
+ COMPARE(str(x26, MemOperand(cp, -1)), "stur x26, [cp, #-1]");
+ COMPARE(str(jssp, MemOperand(fp, 255)), "stur jssp, [fp, #255]");
+ COMPARE(str(lr, MemOperand(x0, -256)), "stur lr, [x0, #-256]");
+ COMPARE(ldr(w0, MemOperand(csp, 1)), "ldur w0, [csp, #1]");
+ COMPARE(str(x1, MemOperand(csp, -1)), "stur x1, [csp, #-1]");
+ COMPARE(ldrb(w2, MemOperand(x3, -2)), "ldurb w2, [x3, #-2]");
+ COMPARE(ldrsb(w4, MemOperand(x5, -3)), "ldursb w4, [x5, #-3]");
+ COMPARE(ldrsb(x6, MemOperand(x7, -4)), "ldursb x6, [x7, #-4]");
+ COMPARE(ldrh(w8, MemOperand(x9, -5)), "ldurh w8, [x9, #-5]");
+ COMPARE(ldrsh(w10, MemOperand(x11, -6)), "ldursh w10, [x11, #-6]");
+ COMPARE(ldrsh(x12, MemOperand(x13, -7)), "ldursh x12, [x13, #-7]");
+ COMPARE(ldrsw(x14, MemOperand(x15, -8)), "ldursw x14, [x15, #-8]");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_pair) {
+ SET_UP();
+
+ COMPARE(ldp(w0, w1, MemOperand(x2)), "ldp w0, w1, [x2]");
+ COMPARE(ldp(x3, x4, MemOperand(x5)), "ldp x3, x4, [x5]");
+ COMPARE(ldp(w6, w7, MemOperand(x8, 4)), "ldp w6, w7, [x8, #4]");
+ COMPARE(ldp(x9, x10, MemOperand(x11, 8)), "ldp x9, x10, [x11, #8]");
+ COMPARE(ldp(w12, w13, MemOperand(x14, 252)), "ldp w12, w13, [x14, #252]");
+ COMPARE(ldp(x15, x16, MemOperand(x17, 504)), "ldp x15, x16, [x17, #504]");
+ COMPARE(ldp(w18, w19, MemOperand(x20, -256)), "ldp w18, w19, [x20, #-256]");
+ COMPARE(ldp(x21, x22, MemOperand(x23, -512)), "ldp x21, x22, [x23, #-512]");
+ COMPARE(ldp(w24, w25, MemOperand(x26, 252, PreIndex)),
+ "ldp w24, w25, [x26, #252]!");
+ COMPARE(ldp(cp, jssp, MemOperand(fp, 504, PreIndex)),
+ "ldp cp, jssp, [fp, #504]!");
+ COMPARE(ldp(w30, w0, MemOperand(x1, -256, PreIndex)),
+ "ldp w30, w0, [x1, #-256]!");
+ COMPARE(ldp(x2, x3, MemOperand(x4, -512, PreIndex)),
+ "ldp x2, x3, [x4, #-512]!");
+ COMPARE(ldp(w5, w6, MemOperand(x7, 252, PostIndex)),
+ "ldp w5, w6, [x7], #252");
+ COMPARE(ldp(x8, x9, MemOperand(x10, 504, PostIndex)),
+ "ldp x8, x9, [x10], #504");
+ COMPARE(ldp(w11, w12, MemOperand(x13, -256, PostIndex)),
+ "ldp w11, w12, [x13], #-256");
+ COMPARE(ldp(x14, x15, MemOperand(x16, -512, PostIndex)),
+ "ldp x14, x15, [x16], #-512");
+
+ COMPARE(ldp(s17, s18, MemOperand(x19)), "ldp s17, s18, [x19]");
+ COMPARE(ldp(s20, s21, MemOperand(x22, 252)), "ldp s20, s21, [x22, #252]");
+ COMPARE(ldp(s23, s24, MemOperand(x25, -256)), "ldp s23, s24, [x25, #-256]");
+ COMPARE(ldp(s26, s27, MemOperand(jssp, 252, PreIndex)),
+ "ldp s26, s27, [jssp, #252]!");
+ COMPARE(ldp(s29, s30, MemOperand(fp, -256, PreIndex)),
+ "ldp s29, s30, [fp, #-256]!");
+ COMPARE(ldp(s31, s0, MemOperand(x1, 252, PostIndex)),
+ "ldp s31, s0, [x1], #252");
+ COMPARE(ldp(s2, s3, MemOperand(x4, -256, PostIndex)),
+ "ldp s2, s3, [x4], #-256");
+ COMPARE(ldp(d17, d18, MemOperand(x19)), "ldp d17, d18, [x19]");
+ COMPARE(ldp(d20, d21, MemOperand(x22, 504)), "ldp d20, d21, [x22, #504]");
+ COMPARE(ldp(d23, d24, MemOperand(x25, -512)), "ldp d23, d24, [x25, #-512]");
+ COMPARE(ldp(d26, d27, MemOperand(jssp, 504, PreIndex)),
+ "ldp d26, d27, [jssp, #504]!");
+ COMPARE(ldp(d29, d30, MemOperand(fp, -512, PreIndex)),
+ "ldp d29, d30, [fp, #-512]!");
+ COMPARE(ldp(d31, d0, MemOperand(x1, 504, PostIndex)),
+ "ldp d31, d0, [x1], #504");
+ COMPARE(ldp(d2, d3, MemOperand(x4, -512, PostIndex)),
+ "ldp d2, d3, [x4], #-512");
+
+ COMPARE(stp(w0, w1, MemOperand(x2)), "stp w0, w1, [x2]");
+ COMPARE(stp(x3, x4, MemOperand(x5)), "stp x3, x4, [x5]");
+ COMPARE(stp(w6, w7, MemOperand(x8, 4)), "stp w6, w7, [x8, #4]");
+ COMPARE(stp(x9, x10, MemOperand(x11, 8)), "stp x9, x10, [x11, #8]");
+ COMPARE(stp(w12, w13, MemOperand(x14, 252)), "stp w12, w13, [x14, #252]");
+ COMPARE(stp(x15, x16, MemOperand(x17, 504)), "stp x15, x16, [x17, #504]");
+ COMPARE(stp(w18, w19, MemOperand(x20, -256)), "stp w18, w19, [x20, #-256]");
+ COMPARE(stp(x21, x22, MemOperand(x23, -512)), "stp x21, x22, [x23, #-512]");
+ COMPARE(stp(w24, w25, MemOperand(x26, 252, PreIndex)),
+ "stp w24, w25, [x26, #252]!");
+ COMPARE(stp(cp, jssp, MemOperand(fp, 504, PreIndex)),
+ "stp cp, jssp, [fp, #504]!");
+ COMPARE(stp(w30, w0, MemOperand(x1, -256, PreIndex)),
+ "stp w30, w0, [x1, #-256]!");
+ COMPARE(stp(x2, x3, MemOperand(x4, -512, PreIndex)),
+ "stp x2, x3, [x4, #-512]!");
+ COMPARE(stp(w5, w6, MemOperand(x7, 252, PostIndex)),
+ "stp w5, w6, [x7], #252");
+ COMPARE(stp(x8, x9, MemOperand(x10, 504, PostIndex)),
+ "stp x8, x9, [x10], #504");
+ COMPARE(stp(w11, w12, MemOperand(x13, -256, PostIndex)),
+ "stp w11, w12, [x13], #-256");
+ COMPARE(stp(x14, x15, MemOperand(x16, -512, PostIndex)),
+ "stp x14, x15, [x16], #-512");
+
+ COMPARE(stp(s17, s18, MemOperand(x19)), "stp s17, s18, [x19]");
+ COMPARE(stp(s20, s21, MemOperand(x22, 252)), "stp s20, s21, [x22, #252]");
+ COMPARE(stp(s23, s24, MemOperand(x25, -256)), "stp s23, s24, [x25, #-256]");
+ COMPARE(stp(s26, s27, MemOperand(jssp, 252, PreIndex)),
+ "stp s26, s27, [jssp, #252]!");
+ COMPARE(stp(s29, s30, MemOperand(fp, -256, PreIndex)),
+ "stp s29, s30, [fp, #-256]!");
+ COMPARE(stp(s31, s0, MemOperand(x1, 252, PostIndex)),
+ "stp s31, s0, [x1], #252");
+ COMPARE(stp(s2, s3, MemOperand(x4, -256, PostIndex)),
+ "stp s2, s3, [x4], #-256");
+ COMPARE(stp(d17, d18, MemOperand(x19)), "stp d17, d18, [x19]");
+ COMPARE(stp(d20, d21, MemOperand(x22, 504)), "stp d20, d21, [x22, #504]");
+ COMPARE(stp(d23, d24, MemOperand(x25, -512)), "stp d23, d24, [x25, #-512]");
+ COMPARE(stp(d26, d27, MemOperand(jssp, 504, PreIndex)),
+ "stp d26, d27, [jssp, #504]!");
+ COMPARE(stp(d29, d30, MemOperand(fp, -512, PreIndex)),
+ "stp d29, d30, [fp, #-512]!");
+ COMPARE(stp(d31, d0, MemOperand(x1, 504, PostIndex)),
+ "stp d31, d0, [x1], #504");
+ COMPARE(stp(d2, d3, MemOperand(x4, -512, PostIndex)),
+ "stp d2, d3, [x4], #-512");
+
+ // TODO(all): Update / Restore this test.
+ COMPARE(ldp(w16, w17, MemOperand(jssp, 4, PostIndex)),
+ "ldp w16, w17, [jssp], #4");
+ COMPARE(stp(x18, x19, MemOperand(jssp, -8, PreIndex)),
+ "stp x18, x19, [jssp, #-8]!");
+ COMPARE(ldp(s30, s31, MemOperand(jssp, 12, PostIndex)),
+ "ldp s30, s31, [jssp], #12");
+ COMPARE(stp(d30, d31, MemOperand(jssp, -16)),
+ "stp d30, d31, [jssp, #-16]");
+
+ COMPARE(ldpsw(x0, x1, MemOperand(x2)), "ldpsw x0, x1, [x2]");
+ COMPARE(ldpsw(x3, x4, MemOperand(x5, 16)), "ldpsw x3, x4, [x5, #16]");
+ COMPARE(ldpsw(x6, x7, MemOperand(x8, -32, PreIndex)),
+ "ldpsw x6, x7, [x8, #-32]!");
+ COMPARE(ldpsw(x9, x10, MemOperand(x11, 128, PostIndex)),
+ "ldpsw x9, x10, [x11], #128");
+
+ CLEANUP();
+}
+
+
+TEST_(load_store_pair_nontemp) {
+ SET_UP();
+
+ COMPARE(ldnp(w0, w1, MemOperand(x2)), "ldnp w0, w1, [x2]");
+ COMPARE(stnp(w3, w4, MemOperand(x5, 252)), "stnp w3, w4, [x5, #252]");
+ COMPARE(ldnp(w6, w7, MemOperand(x8, -256)), "ldnp w6, w7, [x8, #-256]");
+ COMPARE(stnp(x9, x10, MemOperand(x11)), "stnp x9, x10, [x11]");
+ COMPARE(ldnp(x12, x13, MemOperand(x14, 504)), "ldnp x12, x13, [x14, #504]");
+ COMPARE(stnp(x15, x16, MemOperand(x17, -512)), "stnp x15, x16, [x17, #-512]");
+ COMPARE(ldnp(s18, s19, MemOperand(x20)), "ldnp s18, s19, [x20]");
+ COMPARE(stnp(s21, s22, MemOperand(x23, 252)), "stnp s21, s22, [x23, #252]");
+ COMPARE(ldnp(s24, s25, MemOperand(x26, -256)), "ldnp s24, s25, [x26, #-256]");
+ COMPARE(stnp(d27, d28, MemOperand(fp)), "stnp d27, d28, [fp]");
+ COMPARE(ldnp(d30, d31, MemOperand(x0, 504)), "ldnp d30, d31, [x0, #504]");
+ COMPARE(stnp(d1, d2, MemOperand(x3, -512)), "stnp d1, d2, [x3, #-512]");
+
+ CLEANUP();
+}
+
+#if 0 // TODO(all): enable.
+TEST_(load_literal) {
+ SET_UP();
+
+ COMPARE_PREFIX(ldr(x10, 0x1234567890abcdefUL), "ldr x10, pc+8");
+ COMPARE_PREFIX(ldr(w20, 0xfedcba09), "ldr w20, pc+8");
+ COMPARE_PREFIX(ldr(d11, 1.234), "ldr d11, pc+8");
+ COMPARE_PREFIX(ldr(s22, 2.5f), "ldr s22, pc+8");
+
+ CLEANUP();
+}
+#endif
+
+TEST_(cond_select) {
+ SET_UP();
+
+ COMPARE(csel(w0, w1, w2, eq), "csel w0, w1, w2, eq");
+ COMPARE(csel(x3, x4, x5, ne), "csel x3, x4, x5, ne");
+ COMPARE(csinc(w6, w7, w8, hs), "csinc w6, w7, w8, hs");
+ COMPARE(csinc(x9, x10, x11, lo), "csinc x9, x10, x11, lo");
+ COMPARE(csinv(w12, w13, w14, mi), "csinv w12, w13, w14, mi");
+ COMPARE(csinv(x15, x16, x17, pl), "csinv x15, x16, x17, pl");
+ COMPARE(csneg(w18, w19, w20, vs), "csneg w18, w19, w20, vs");
+ COMPARE(csneg(x21, x22, x23, vc), "csneg x21, x22, x23, vc");
+ COMPARE(cset(w24, hi), "cset w24, hi");
+ COMPARE(cset(x25, ls), "cset x25, ls");
+ COMPARE(csetm(w26, ge), "csetm w26, ge");
+ COMPARE(csetm(cp, lt), "csetm cp, lt");
+ COMPARE(cinc(w28, w29, gt), "cinc w28, w29, gt");
+ COMPARE(cinc(lr, x0, le), "cinc lr, x0, le");
+ COMPARE(cinv(w1, w2, eq), "cinv w1, w2, eq");
+ COMPARE(cinv(x3, x4, ne), "cinv x3, x4, ne");
+ COMPARE(cneg(w5, w6, hs), "cneg w5, w6, hs");
+ COMPARE(cneg(x7, x8, lo), "cneg x7, x8, lo");
+
+ COMPARE(csel(x0, x1, x2, al), "csel x0, x1, x2, al");
+ COMPARE(csel(x1, x2, x3, nv), "csel x1, x2, x3, nv");
+ COMPARE(csinc(x2, x3, x4, al), "csinc x2, x3, x4, al");
+ COMPARE(csinc(x3, x4, x5, nv), "csinc x3, x4, x5, nv");
+ COMPARE(csinv(x4, x5, x6, al), "csinv x4, x5, x6, al");
+ COMPARE(csinv(x5, x6, x7, nv), "csinv x5, x6, x7, nv");
+ COMPARE(csneg(x6, x7, x8, al), "csneg x6, x7, x8, al");
+ COMPARE(csneg(x7, x8, x9, nv), "csneg x7, x8, x9, nv");
+
+ CLEANUP();
+}
+
+
+TEST(cond_select_macro) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(Csel(w0, w1, -1, eq), "csinv w0, w1, wzr, eq");
+ COMPARE(Csel(w2, w3, 0, ne), "csel w2, w3, wzr, ne");
+ COMPARE(Csel(w4, w5, 1, hs), "csinc w4, w5, wzr, hs");
+ COMPARE(Csel(x6, x7, -1, lo), "csinv x6, x7, xzr, lo");
+ COMPARE(Csel(x8, x9, 0, mi), "csel x8, x9, xzr, mi");
+ COMPARE(Csel(x10, x11, 1, pl), "csinc x10, x11, xzr, pl");
+
+ CLEANUP();
+}
+
+
+TEST_(cond_cmp) {
+ SET_UP();
+
+ COMPARE(ccmn(w0, w1, NZCVFlag, eq), "ccmn w0, w1, #NZCV, eq");
+ COMPARE(ccmn(x2, x3, NZCFlag, ne), "ccmn x2, x3, #NZCv, ne");
+ COMPARE(ccmp(w4, w5, NZVFlag, hs), "ccmp w4, w5, #NZcV, hs");
+ COMPARE(ccmp(x6, x7, NZFlag, lo), "ccmp x6, x7, #NZcv, lo");
+ COMPARE(ccmn(w8, 31, NFlag, mi), "ccmn w8, #31, #Nzcv, mi");
+ COMPARE(ccmn(x9, 30, NCFlag, pl), "ccmn x9, #30, #NzCv, pl");
+ COMPARE(ccmp(w10, 29, NVFlag, vs), "ccmp w10, #29, #NzcV, vs");
+ COMPARE(ccmp(x11, 28, NFlag, vc), "ccmp x11, #28, #Nzcv, vc");
+ COMPARE(ccmn(w12, w13, NoFlag, al), "ccmn w12, w13, #nzcv, al");
+ COMPARE(ccmp(x14, 27, ZVFlag, nv), "ccmp x14, #27, #nZcV, nv");
+
+ CLEANUP();
+}
+
+
+TEST_(cond_cmp_macro) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(Ccmp(w0, -1, VFlag, hi), "ccmn w0, #1, #nzcV, hi");
+ COMPARE(Ccmp(x1, -31, CFlag, ge), "ccmn x1, #31, #nzCv, ge");
+ COMPARE(Ccmn(w2, -1, CVFlag, gt), "ccmp w2, #1, #nzCV, gt");
+ COMPARE(Ccmn(x3, -31, ZCVFlag, ls), "ccmp x3, #31, #nZCV, ls");
+
+ CLEANUP();
+}
+
+
+TEST_(fmov_imm) {
+ SET_UP();
+
+ COMPARE(fmov(s0, 1.0f), "fmov s0, #0x70 (1.0000)");
+ COMPARE(fmov(s31, -13.0f), "fmov s31, #0xaa (-13.0000)");
+ COMPARE(fmov(d1, 1.0), "fmov d1, #0x70 (1.0000)");
+ COMPARE(fmov(d29, -13.0), "fmov d29, #0xaa (-13.0000)");
+
+ CLEANUP();
+}
+
+
+TEST_(fmov_reg) {
+ SET_UP();
+
+ COMPARE(fmov(w3, s13), "fmov w3, s13");
+ COMPARE(fmov(x6, d26), "fmov x6, d26");
+ COMPARE(fmov(s11, w30), "fmov s11, w30");
+ COMPARE(fmov(d31, x2), "fmov d31, x2");
+ COMPARE(fmov(s12, s13), "fmov s12, s13");
+ COMPARE(fmov(d22, d23), "fmov d22, d23");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_dp1) {
+ SET_UP();
+
+ COMPARE(fabs(s0, s1), "fabs s0, s1");
+ COMPARE(fabs(s31, s30), "fabs s31, s30");
+ COMPARE(fabs(d2, d3), "fabs d2, d3");
+ COMPARE(fabs(d31, d30), "fabs d31, d30");
+ COMPARE(fneg(s4, s5), "fneg s4, s5");
+ COMPARE(fneg(s31, s30), "fneg s31, s30");
+ COMPARE(fneg(d6, d7), "fneg d6, d7");
+ COMPARE(fneg(d31, d30), "fneg d31, d30");
+ COMPARE(fsqrt(s8, s9), "fsqrt s8, s9");
+ COMPARE(fsqrt(s31, s30), "fsqrt s31, s30");
+ COMPARE(fsqrt(d10, d11), "fsqrt d10, d11");
+ COMPARE(fsqrt(d31, d30), "fsqrt d31, d30");
+ COMPARE(frinta(s10, s11), "frinta s10, s11");
+ COMPARE(frinta(s31, s30), "frinta s31, s30");
+ COMPARE(frinta(d12, d13), "frinta d12, d13");
+ COMPARE(frinta(d31, d30), "frinta d31, d30");
+ COMPARE(frintn(s10, s11), "frintn s10, s11");
+ COMPARE(frintn(s31, s30), "frintn s31, s30");
+ COMPARE(frintn(d12, d13), "frintn d12, d13");
+ COMPARE(frintn(d31, d30), "frintn d31, d30");
+ COMPARE(frintz(s10, s11), "frintz s10, s11");
+ COMPARE(frintz(s31, s30), "frintz s31, s30");
+ COMPARE(frintz(d12, d13), "frintz d12, d13");
+ COMPARE(frintz(d31, d30), "frintz d31, d30");
+ COMPARE(fcvt(d14, s15), "fcvt d14, s15");
+ COMPARE(fcvt(d31, s31), "fcvt d31, s31");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_dp2) {
+ SET_UP();
+
+ COMPARE(fadd(s0, s1, s2), "fadd s0, s1, s2");
+ COMPARE(fadd(d3, d4, d5), "fadd d3, d4, d5");
+ COMPARE(fsub(s31, s30, s29), "fsub s31, s30, s29");
+ COMPARE(fsub(d31, d30, d29), "fsub d31, d30, d29");
+ COMPARE(fmul(s7, s8, s9), "fmul s7, s8, s9");
+ COMPARE(fmul(d10, d11, d12), "fmul d10, d11, d12");
+ COMPARE(fdiv(s13, s14, s15), "fdiv s13, s14, s15");
+ COMPARE(fdiv(d16, d17, d18), "fdiv d16, d17, d18");
+ COMPARE(fmax(s19, s20, s21), "fmax s19, s20, s21");
+ COMPARE(fmax(d22, d23, d24), "fmax d22, d23, d24");
+ COMPARE(fmin(s25, s26, s27), "fmin s25, s26, s27");
+ COMPARE(fmin(d28, d29, d30), "fmin d28, d29, d30");
+ COMPARE(fmaxnm(s31, s0, s1), "fmaxnm s31, s0, s1");
+ COMPARE(fmaxnm(d2, d3, d4), "fmaxnm d2, d3, d4");
+ COMPARE(fminnm(s5, s6, s7), "fminnm s5, s6, s7");
+ COMPARE(fminnm(d8, d9, d10), "fminnm d8, d9, d10");
+
+ CLEANUP();
+}
+
+
+TEST(fp_dp3) {
+ SET_UP();
+
+ COMPARE(fmadd(s7, s8, s9, s10), "fmadd s7, s8, s9, s10");
+ COMPARE(fmadd(d10, d11, d12, d10), "fmadd d10, d11, d12, d10");
+ COMPARE(fmsub(s7, s8, s9, s10), "fmsub s7, s8, s9, s10");
+ COMPARE(fmsub(d10, d11, d12, d10), "fmsub d10, d11, d12, d10");
+
+ COMPARE(fnmadd(s7, s8, s9, s10), "fnmadd s7, s8, s9, s10");
+ COMPARE(fnmadd(d10, d11, d12, d10), "fnmadd d10, d11, d12, d10");
+ COMPARE(fnmsub(s7, s8, s9, s10), "fnmsub s7, s8, s9, s10");
+ COMPARE(fnmsub(d10, d11, d12, d10), "fnmsub d10, d11, d12, d10");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_compare) {
+ SET_UP();
+
+ COMPARE(fcmp(s0, s1), "fcmp s0, s1");
+ COMPARE(fcmp(s31, s30), "fcmp s31, s30");
+ COMPARE(fcmp(d0, d1), "fcmp d0, d1");
+ COMPARE(fcmp(d31, d30), "fcmp d31, d30");
+ COMPARE(fcmp(s12, 0), "fcmp s12, #0.0");
+ COMPARE(fcmp(d12, 0), "fcmp d12, #0.0");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_cond_compare) {
+ SET_UP();
+
+ COMPARE(fccmp(s0, s1, NoFlag, eq), "fccmp s0, s1, #nzcv, eq");
+ COMPARE(fccmp(s2, s3, ZVFlag, ne), "fccmp s2, s3, #nZcV, ne");
+ COMPARE(fccmp(s30, s16, NCFlag, pl), "fccmp s30, s16, #NzCv, pl");
+ COMPARE(fccmp(s31, s31, NZCVFlag, le), "fccmp s31, s31, #NZCV, le");
+ COMPARE(fccmp(d4, d5, VFlag, gt), "fccmp d4, d5, #nzcV, gt");
+ COMPARE(fccmp(d6, d7, NFlag, vs), "fccmp d6, d7, #Nzcv, vs");
+ COMPARE(fccmp(d30, d0, NZFlag, vc), "fccmp d30, d0, #NZcv, vc");
+ COMPARE(fccmp(d31, d31, ZFlag, hs), "fccmp d31, d31, #nZcv, hs");
+ COMPARE(fccmp(s14, s15, CVFlag, al), "fccmp s14, s15, #nzCV, al");
+ COMPARE(fccmp(d16, d17, CFlag, nv), "fccmp d16, d17, #nzCv, nv");
+
+ CLEANUP();
+}
+
+
+TEST_(fp_select) {
+ SET_UP();
+
+ COMPARE(fcsel(s0, s1, s2, eq), "fcsel s0, s1, s2, eq")
+ COMPARE(fcsel(s31, s31, s30, ne), "fcsel s31, s31, s30, ne");
+ COMPARE(fcsel(d0, d1, d2, mi), "fcsel d0, d1, d2, mi");
+ COMPARE(fcsel(d31, d30, d31, pl), "fcsel d31, d30, d31, pl");
+ COMPARE(fcsel(s14, s15, s16, al), "fcsel s14, s15, s16, al");
+ COMPARE(fcsel(d17, d18, d19, nv), "fcsel d17, d18, d19, nv");
+
+ CLEANUP();
+}
+
+
+TEST_(fcvt_scvtf_ucvtf) {
+ SET_UP();
+
+ COMPARE(fcvtas(w0, s1), "fcvtas w0, s1");
+ COMPARE(fcvtas(x2, s3), "fcvtas x2, s3");
+ COMPARE(fcvtas(w4, d5), "fcvtas w4, d5");
+ COMPARE(fcvtas(x6, d7), "fcvtas x6, d7");
+ COMPARE(fcvtau(w8, s9), "fcvtau w8, s9");
+ COMPARE(fcvtau(x10, s11), "fcvtau x10, s11");
+ COMPARE(fcvtau(w12, d13), "fcvtau w12, d13");
+ COMPARE(fcvtau(x14, d15), "fcvtau x14, d15");
+ COMPARE(fcvtns(w0, s1), "fcvtns w0, s1");
+ COMPARE(fcvtns(x2, s3), "fcvtns x2, s3");
+ COMPARE(fcvtns(w4, d5), "fcvtns w4, d5");
+ COMPARE(fcvtns(x6, d7), "fcvtns x6, d7");
+ COMPARE(fcvtnu(w8, s9), "fcvtnu w8, s9");
+ COMPARE(fcvtnu(x10, s11), "fcvtnu x10, s11");
+ COMPARE(fcvtnu(w12, d13), "fcvtnu w12, d13");
+ COMPARE(fcvtnu(x14, d15), "fcvtnu x14, d15");
+ COMPARE(fcvtzu(x16, d17), "fcvtzu x16, d17");
+ COMPARE(fcvtzu(w18, d19), "fcvtzu w18, d19");
+ COMPARE(fcvtzs(x20, d21), "fcvtzs x20, d21");
+ COMPARE(fcvtzs(w22, d23), "fcvtzs w22, d23");
+ COMPARE(fcvtzu(x16, s17), "fcvtzu x16, s17");
+ COMPARE(fcvtzu(w18, s19), "fcvtzu w18, s19");
+ COMPARE(fcvtzs(x20, s21), "fcvtzs x20, s21");
+ COMPARE(fcvtzs(w22, s23), "fcvtzs w22, s23");
+ COMPARE(scvtf(d24, w25), "scvtf d24, w25");
+ COMPARE(scvtf(s24, w25), "scvtf s24, w25");
+ COMPARE(scvtf(d26, x0), "scvtf d26, x0");
+ COMPARE(scvtf(s26, x0), "scvtf s26, x0");
+ COMPARE(ucvtf(d28, w29), "ucvtf d28, w29");
+ COMPARE(ucvtf(s28, w29), "ucvtf s28, w29");
+ COMPARE(ucvtf(d0, x1), "ucvtf d0, x1");
+ COMPARE(ucvtf(s0, x1), "ucvtf s0, x1");
+ COMPARE(ucvtf(d0, x1, 0), "ucvtf d0, x1");
+ COMPARE(ucvtf(s0, x1, 0), "ucvtf s0, x1");
+ COMPARE(scvtf(d1, x2, 1), "scvtf d1, x2, #1");
+ COMPARE(scvtf(s1, x2, 1), "scvtf s1, x2, #1");
+ COMPARE(scvtf(d3, x4, 15), "scvtf d3, x4, #15");
+ COMPARE(scvtf(s3, x4, 15), "scvtf s3, x4, #15");
+ COMPARE(scvtf(d5, x6, 32), "scvtf d5, x6, #32");
+ COMPARE(scvtf(s5, x6, 32), "scvtf s5, x6, #32");
+ COMPARE(ucvtf(d7, x8, 2), "ucvtf d7, x8, #2");
+ COMPARE(ucvtf(s7, x8, 2), "ucvtf s7, x8, #2");
+ COMPARE(ucvtf(d9, x10, 16), "ucvtf d9, x10, #16");
+ COMPARE(ucvtf(s9, x10, 16), "ucvtf s9, x10, #16");
+ COMPARE(ucvtf(d11, x12, 33), "ucvtf d11, x12, #33");
+ COMPARE(ucvtf(s11, x12, 33), "ucvtf s11, x12, #33");
+ COMPARE(fcvtms(w0, s1), "fcvtms w0, s1");
+ COMPARE(fcvtms(x2, s3), "fcvtms x2, s3");
+ COMPARE(fcvtms(w4, d5), "fcvtms w4, d5");
+ COMPARE(fcvtms(x6, d7), "fcvtms x6, d7");
+ COMPARE(fcvtmu(w8, s9), "fcvtmu w8, s9");
+ COMPARE(fcvtmu(x10, s11), "fcvtmu x10, s11");
+ COMPARE(fcvtmu(w12, d13), "fcvtmu w12, d13");
+ COMPARE(fcvtmu(x14, d15), "fcvtmu x14, d15");
+
+ CLEANUP();
+}
+
+
+TEST_(system_mrs) {
+ SET_UP();
+
+ COMPARE(mrs(x0, NZCV), "mrs x0, nzcv");
+ COMPARE(mrs(lr, NZCV), "mrs lr, nzcv");
+ COMPARE(mrs(x15, FPCR), "mrs x15, fpcr");
+
+ CLEANUP();
+}
+
+
+TEST_(system_msr) {
+ SET_UP();
+
+ COMPARE(msr(NZCV, x0), "msr nzcv, x0");
+ COMPARE(msr(NZCV, x30), "msr nzcv, lr");
+ COMPARE(msr(FPCR, x15), "msr fpcr, x15");
+
+ CLEANUP();
+}
+
+
+TEST_(system_nop) {
+ SET_UP();
+
+ COMPARE(nop(), "nop");
+
+ CLEANUP();
+}
+
+
+TEST_(debug) {
+ SET_UP();
+
+ ASSERT(kImmExceptionIsDebug == 0xdeb0);
+
+ // All debug codes should produce the same instruction, and the debug code
+ // can be any uint32_t.
+ COMPARE(debug("message", 0, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 1, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0xffff, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0x10000, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0x7fffffff, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0x80000000u, NO_PARAM), "hlt #0xdeb0");
+ COMPARE(debug("message", 0xffffffffu, NO_PARAM), "hlt #0xdeb0");
+
+ CLEANUP();
+}
+
+
+TEST_(hlt) {
+ SET_UP();
+
+ COMPARE(hlt(0), "hlt #0x0");
+ COMPARE(hlt(1), "hlt #0x1");
+ COMPARE(hlt(65535), "hlt #0xffff");
+
+ CLEANUP();
+}
+
+
+TEST_(brk) {
+ SET_UP();
+
+ COMPARE(brk(0), "brk #0x0");
+ COMPARE(brk(1), "brk #0x1");
+ COMPARE(brk(65535), "brk #0xffff");
+
+ CLEANUP();
+}
+
+
+TEST_(add_sub_negative) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(Add(x10, x0, -42), "sub x10, x0, #0x2a (42)");
+ COMPARE(Add(x11, x1, -687), "sub x11, x1, #0x2af (687)");
+ COMPARE(Add(x12, x2, -0x88), "sub x12, x2, #0x88 (136)");
+
+ COMPARE(Sub(x13, x0, -600), "add x13, x0, #0x258 (600)");
+ COMPARE(Sub(x14, x1, -313), "add x14, x1, #0x139 (313)");
+ COMPARE(Sub(x15, x2, -0x555), "add x15, x2, #0x555 (1365)");
+
+ COMPARE(Add(w19, w3, -0x344), "sub w19, w3, #0x344 (836)");
+ COMPARE(Add(w20, w4, -2000), "sub w20, w4, #0x7d0 (2000)");
+
+ COMPARE(Sub(w21, w3, -0xbc), "add w21, w3, #0xbc (188)");
+ COMPARE(Sub(w22, w4, -2000), "add w22, w4, #0x7d0 (2000)");
+
+ COMPARE(Cmp(w0, -1), "cmn w0, #0x1 (1)");
+ COMPARE(Cmp(x1, -1), "cmn x1, #0x1 (1)");
+ COMPARE(Cmp(w2, -4095), "cmn w2, #0xfff (4095)");
+ COMPARE(Cmp(x3, -4095), "cmn x3, #0xfff (4095)");
+
+ COMPARE(Cmn(w0, -1), "cmp w0, #0x1 (1)");
+ COMPARE(Cmn(x1, -1), "cmp x1, #0x1 (1)");
+ COMPARE(Cmn(w2, -4095), "cmp w2, #0xfff (4095)");
+ COMPARE(Cmn(x3, -4095), "cmp x3, #0xfff (4095)");
+
+ CLEANUP();
+}
+
+
+TEST_(logical_immediate_move) {
+ SET_UP_CLASS(MacroAssembler);
+
+ COMPARE(And(w0, w1, 0), "movz w0, #0x0");
+ COMPARE(And(x0, x1, 0), "movz x0, #0x0");
+ COMPARE(Orr(w2, w3, 0), "mov w2, w3");
+ COMPARE(Orr(x2, x3, 0), "mov x2, x3");
+ COMPARE(Eor(w4, w5, 0), "mov w4, w5");
+ COMPARE(Eor(x4, x5, 0), "mov x4, x5");
+ COMPARE(Bic(w6, w7, 0), "mov w6, w7");
+ COMPARE(Bic(x6, x7, 0), "mov x6, x7");
+ COMPARE(Orn(w8, w9, 0), "movn w8, #0x0");
+ COMPARE(Orn(x8, x9, 0), "movn x8, #0x0");
+ COMPARE(Eon(w10, w11, 0), "mvn w10, w11");
+ COMPARE(Eon(x10, x11, 0), "mvn x10, x11");
+
+ COMPARE(And(w12, w13, 0xffffffff), "mov w12, w13");
+ COMPARE(And(x12, x13, 0xffffffff), "and x12, x13, #0xffffffff");
+ COMPARE(And(x12, x13, 0xffffffffffffffff), "mov x12, x13");
+ COMPARE(Orr(w14, w15, 0xffffffff), "movn w14, #0x0");
+ COMPARE(Orr(x14, x15, 0xffffffff), "orr x14, x15, #0xffffffff");
+ COMPARE(Orr(x14, x15, 0xffffffffffffffff), "movn x14, #0x0");
+ COMPARE(Eor(w16, w17, 0xffffffff), "mvn w16, w17");
+ COMPARE(Eor(x16, x17, 0xffffffff), "eor x16, x17, #0xffffffff");
+ COMPARE(Eor(x16, x17, 0xffffffffffffffff), "mvn x16, x17");
+ COMPARE(Bic(w18, w19, 0xffffffff), "movz w18, #0x0");
+ COMPARE(Bic(x18, x19, 0xffffffff), "and x18, x19, #0xffffffff00000000");
+ COMPARE(Bic(x18, x19, 0xffffffffffffffff), "movz x18, #0x0");
+ COMPARE(Orn(w20, w21, 0xffffffff), "mov w20, w21");
+ COMPARE(Orn(x20, x21, 0xffffffff), "orr x20, x21, #0xffffffff00000000");
+ COMPARE(Orn(x20, x21, 0xffffffffffffffff), "mov x20, x21");
+ COMPARE(Eon(w22, w23, 0xffffffff), "mov w22, w23");
+ COMPARE(Eon(x22, x23, 0xffffffff), "eor x22, x23, #0xffffffff00000000");
+ COMPARE(Eon(x22, x23, 0xffffffffffffffff), "mov x22, x23");
+
+ CLEANUP();
+}
+
+
+TEST_(barriers) {
+ SET_UP_CLASS(MacroAssembler);
+
+ // DMB
+ COMPARE(Dmb(FullSystem, BarrierAll), "dmb sy");
+ COMPARE(Dmb(FullSystem, BarrierReads), "dmb ld");
+ COMPARE(Dmb(FullSystem, BarrierWrites), "dmb st");
+
+ COMPARE(Dmb(InnerShareable, BarrierAll), "dmb ish");
+ COMPARE(Dmb(InnerShareable, BarrierReads), "dmb ishld");
+ COMPARE(Dmb(InnerShareable, BarrierWrites), "dmb ishst");
+
+ COMPARE(Dmb(NonShareable, BarrierAll), "dmb nsh");
+ COMPARE(Dmb(NonShareable, BarrierReads), "dmb nshld");
+ COMPARE(Dmb(NonShareable, BarrierWrites), "dmb nshst");
+
+ COMPARE(Dmb(OuterShareable, BarrierAll), "dmb osh");
+ COMPARE(Dmb(OuterShareable, BarrierReads), "dmb oshld");
+ COMPARE(Dmb(OuterShareable, BarrierWrites), "dmb oshst");
+
+ COMPARE(Dmb(FullSystem, BarrierOther), "dmb sy (0b1100)");
+ COMPARE(Dmb(InnerShareable, BarrierOther), "dmb sy (0b1000)");
+ COMPARE(Dmb(NonShareable, BarrierOther), "dmb sy (0b0100)");
+ COMPARE(Dmb(OuterShareable, BarrierOther), "dmb sy (0b0000)");
+
+ // DSB
+ COMPARE(Dsb(FullSystem, BarrierAll), "dsb sy");
+ COMPARE(Dsb(FullSystem, BarrierReads), "dsb ld");
+ COMPARE(Dsb(FullSystem, BarrierWrites), "dsb st");
+
+ COMPARE(Dsb(InnerShareable, BarrierAll), "dsb ish");
+ COMPARE(Dsb(InnerShareable, BarrierReads), "dsb ishld");
+ COMPARE(Dsb(InnerShareable, BarrierWrites), "dsb ishst");
+
+ COMPARE(Dsb(NonShareable, BarrierAll), "dsb nsh");
+ COMPARE(Dsb(NonShareable, BarrierReads), "dsb nshld");
+ COMPARE(Dsb(NonShareable, BarrierWrites), "dsb nshst");
+
+ COMPARE(Dsb(OuterShareable, BarrierAll), "dsb osh");
+ COMPARE(Dsb(OuterShareable, BarrierReads), "dsb oshld");
+ COMPARE(Dsb(OuterShareable, BarrierWrites), "dsb oshst");
+
+ COMPARE(Dsb(FullSystem, BarrierOther), "dsb sy (0b1100)");
+ COMPARE(Dsb(InnerShareable, BarrierOther), "dsb sy (0b1000)");
+ COMPARE(Dsb(NonShareable, BarrierOther), "dsb sy (0b0100)");
+ COMPARE(Dsb(OuterShareable, BarrierOther), "dsb sy (0b0000)");
+
+ // ISB
+ COMPARE(Isb(), "isb");
+
+ CLEANUP();
+}
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index f32a69c4a..7ca95f6c9 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -28,13 +28,13 @@
#include <stdlib.h>
#include "v8.h"
-#include "stub-cache.h"
#include "debug.h"
#include "disasm.h"
#include "disassembler.h"
#include "macro-assembler.h"
#include "serialize.h"
+#include "stub-cache.h"
#include "cctest.h"
using namespace v8::internal;
@@ -49,7 +49,7 @@ static void DummyStaticFunction(Object* result) {
TEST(DisasmIa320) {
CcTest::InitializeVM();
- Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::internal::byte buffer[2048];
Assembler assm(isolate, buffer, sizeof buffer);
@@ -74,12 +74,23 @@ TEST(DisasmIa320) {
__ add(edx, Operand(ebx, 0));
__ add(edx, Operand(ebx, 16));
__ add(edx, Operand(ebx, 1999));
+ __ add(edx, Operand(ebx, -4));
+ __ add(edx, Operand(ebx, -1999));
__ add(edx, Operand(esp, 0));
__ add(edx, Operand(esp, 16));
__ add(edx, Operand(esp, 1999));
+ __ add(edx, Operand(esp, -4));
+ __ add(edx, Operand(esp, -1999));
+ __ nop();
+ __ add(esi, Operand(ecx, times_4, 0));
+ __ add(esi, Operand(ecx, times_4, 24));
+ __ add(esi, Operand(ecx, times_4, -4));
+ __ add(esi, Operand(ecx, times_4, -1999));
__ nop();
__ add(edi, Operand(ebp, ecx, times_4, 0));
__ add(edi, Operand(ebp, ecx, times_4, 12));
+ __ add(edi, Operand(ebp, ecx, times_4, -8));
+ __ add(edi, Operand(ebp, ecx, times_4, -3999));
__ add(Operand(ebp, ecx, times_4, 12), Immediate(12));
__ nop();
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 9fce25fae..5ca12b943 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -34,6 +34,7 @@
#include "disassembler.h"
#include "macro-assembler.h"
#include "serialize.h"
+#include "stub-cache.h"
#include "cctest.h"
using namespace v8::internal;
@@ -48,17 +49,18 @@ static void DummyStaticFunction(Object* result) {
TEST(DisasmX64) {
CcTest::InitializeVM();
- v8::HandleScope scope;
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
v8::internal::byte buffer[2048];
- Assembler assm(CcTest::i_isolate(), buffer, sizeof buffer);
+ Assembler assm(isolate, buffer, sizeof buffer);
DummyStaticFunction(NULL); // just bloody use it (DELETE; debugging)
// Short immediate instructions
__ addq(rax, Immediate(12345678));
- __ or_(rax, Immediate(12345678));
+ __ orq(rax, Immediate(12345678));
__ subq(rax, Immediate(12345678));
- __ xor_(rax, Immediate(12345678));
- __ and_(rax, Immediate(12345678));
+ __ xorq(rax, Immediate(12345678));
+ __ andq(rax, Immediate(12345678));
// ---- This one caused crash
__ movq(rbx, Operand(rsp, rcx, times_2, 0)); // [rsp+rcx*4]
@@ -68,27 +70,38 @@ TEST(DisasmX64) {
__ addq(rdx, Operand(rbx, 0));
__ addq(rdx, Operand(rbx, 16));
__ addq(rdx, Operand(rbx, 1999));
+ __ addq(rdx, Operand(rbx, -4));
+ __ addq(rdx, Operand(rbx, -1999));
__ addq(rdx, Operand(rsp, 0));
__ addq(rdx, Operand(rsp, 16));
__ addq(rdx, Operand(rsp, 1999));
+ __ addq(rdx, Operand(rsp, -4));
+ __ addq(rdx, Operand(rsp, -1999));
+ __ nop();
+ __ addq(rsi, Operand(rcx, times_4, 0));
+ __ addq(rsi, Operand(rcx, times_4, 24));
+ __ addq(rsi, Operand(rcx, times_4, -4));
+ __ addq(rsi, Operand(rcx, times_4, -1999));
__ nop();
__ addq(rdi, Operand(rbp, rcx, times_4, 0));
__ addq(rdi, Operand(rbp, rcx, times_4, 12));
+ __ addq(rdi, Operand(rbp, rcx, times_4, -8));
+ __ addq(rdi, Operand(rbp, rcx, times_4, -3999));
__ addq(Operand(rbp, rcx, times_4, 12), Immediate(12));
__ nop();
__ addq(rbx, Immediate(12));
__ nop();
__ nop();
- __ and_(rdx, Immediate(3));
- __ and_(rdx, Operand(rsp, 4));
+ __ andq(rdx, Immediate(3));
+ __ andq(rdx, Operand(rsp, 4));
__ cmpq(rdx, Immediate(3));
__ cmpq(rdx, Operand(rsp, 4));
__ cmpq(Operand(rbp, rcx, times_4, 0), Immediate(1000));
__ cmpb(rbx, Operand(rbp, rcx, times_2, 0));
__ cmpb(Operand(rbp, rcx, times_2, 0), rbx);
- __ or_(rdx, Immediate(3));
- __ xor_(rdx, Immediate(3));
+ __ orq(rdx, Immediate(3));
+ __ xorq(rdx, Immediate(3));
__ nop();
__ cpuid();
__ movsxbq(rdx, Operand(rcx, 0));
@@ -99,23 +112,23 @@ TEST(DisasmX64) {
__ movzxwq(rdx, Operand(rcx, 0));
__ nop();
- __ imul(rdx, rcx);
+ __ imulq(rdx, rcx);
__ shld(rdx, rcx);
__ shrd(rdx, rcx);
__ bts(Operand(rdx, 0), rcx);
__ bts(Operand(rbx, rcx, times_4, 0), rcx);
__ nop();
- __ push(Immediate(12));
- __ push(Immediate(23456));
- __ push(rcx);
- __ push(rsi);
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(Operand(rbx, rcx, times_4, 0));
- __ push(Operand(rbx, rcx, times_4, 0));
- __ push(Operand(rbx, rcx, times_4, 10000));
- __ pop(rdx);
- __ pop(rax);
- __ pop(Operand(rbx, rcx, times_4, 0));
+ __ pushq(Immediate(12));
+ __ pushq(Immediate(23456));
+ __ pushq(rcx);
+ __ pushq(rsi);
+ __ pushq(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ pushq(Operand(rbx, rcx, times_4, 0));
+ __ pushq(Operand(rbx, rcx, times_4, 0));
+ __ pushq(Operand(rbx, rcx, times_4, 10000));
+ __ popq(rdx);
+ __ popq(rax);
+ __ popq(Operand(rbx, rcx, times_4, 0));
__ nop();
__ addq(rdx, Operand(rsp, 16));
@@ -145,23 +158,24 @@ TEST(DisasmX64) {
__ nop();
__ idivq(rdx);
__ mul(rdx);
- __ neg(rdx);
- __ not_(rdx);
+ __ negq(rdx);
+ __ notq(rdx);
__ testq(Operand(rbx, rcx, times_4, 10000), rdx);
- __ imul(rdx, Operand(rbx, rcx, times_4, 10000));
- __ imul(rdx, rcx, Immediate(12));
- __ imul(rdx, rcx, Immediate(1000));
+ __ imulq(rdx, Operand(rbx, rcx, times_4, 10000));
+ __ imulq(rdx, rcx, Immediate(12));
+ __ imulq(rdx, rcx, Immediate(1000));
__ incq(rdx);
__ incq(Operand(rbx, rcx, times_4, 10000));
- __ push(Operand(rbx, rcx, times_4, 10000));
- __ pop(Operand(rbx, rcx, times_4, 10000));
- __ jmp(Operand(rbx, rcx, times_4, 10000));
+ __ pushq(Operand(rbx, rcx, times_4, 10000));
+ __ popq(Operand(rbx, rcx, times_4, 10000));
+ // TODO(mstarzinger): The following is protected.
+ // __ jmp(Operand(rbx, rcx, times_4, 10000));
- __ lea(rdx, Operand(rbx, rcx, times_4, 10000));
- __ or_(rdx, Immediate(12345));
- __ or_(rdx, Operand(rbx, rcx, times_4, 10000));
+ __ leaq(rdx, Operand(rbx, rcx, times_4, 10000));
+ __ orq(rdx, Immediate(12345));
+ __ orq(rdx, Operand(rbx, rcx, times_4, 10000));
__ nop();
@@ -188,22 +202,22 @@ TEST(DisasmX64) {
__ addq(rbx, Immediate(12));
__ addq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
- __ and_(rbx, Immediate(12345));
+ __ andq(rbx, Immediate(12345));
__ cmpq(rbx, Immediate(12345));
__ cmpq(rbx, Immediate(12));
__ cmpq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
__ cmpb(rax, Immediate(100));
- __ or_(rbx, Immediate(12345));
+ __ orq(rbx, Immediate(12345));
__ subq(rbx, Immediate(12));
__ subq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
- __ xor_(rbx, Immediate(12345));
+ __ xorq(rbx, Immediate(12345));
- __ imul(rdx, rcx, Immediate(12));
- __ imul(rdx, rcx, Immediate(1000));
+ __ imulq(rdx, rcx, Immediate(12));
+ __ imulq(rdx, rcx, Immediate(1000));
__ cld();
@@ -216,8 +230,8 @@ TEST(DisasmX64) {
__ testb(Operand(rax, -20), Immediate(0x9A));
__ nop();
- __ xor_(rdx, Immediate(12345));
- __ xor_(rdx, Operand(rbx, rcx, times_8, 10000));
+ __ xorq(rdx, Immediate(12345));
+ __ xorq(rdx, Operand(rbx, rcx, times_8, 10000));
__ bts(Operand(rbx, rcx, times_8, 10000), rdx);
__ hlt();
__ int3();
@@ -233,20 +247,20 @@ TEST(DisasmX64) {
__ call(&L2);
__ nop();
__ bind(&L2);
- __ call(Operand(rbx, rcx, times_4, 10000));
+ // TODO(mstarzinger): The following is protected.
+ // __ call(Operand(rbx, rcx, times_4, 10000));
__ nop();
- Handle<Code> ic(CcTest::i_isolate()->builtins()->builtin(
- Builtins::kLoadIC_Initialize));
+ Handle<Code> ic(LoadIC::initialize_stub(isolate, NOT_CONTEXTUAL));
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
__ nop();
__ jmp(&L1);
- __ jmp(Operand(rbx, rcx, times_4, 10000));
+ // TODO(mstarzinger): The following is protected.
+ // __ jmp(Operand(rbx, rcx, times_4, 10000));
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget(),
- assm.isolate());
+ ExternalReference(Debug_Address::AfterBreakTarget(), isolate);
USE(after_break_target);
#endif // ENABLE_DEBUGGER_SUPPORT
__ jmp(ic, RelocInfo::CODE_TARGET);
@@ -345,9 +359,9 @@ TEST(DisasmX64) {
__ andps(xmm0, xmm1);
__ andps(xmm0, Operand(rbx, rcx, times_4, 10000));
__ orps(xmm0, xmm1);
- __ ordps(xmm0, Operand(rbx, rcx, times_4, 10000));
+ __ orps(xmm0, Operand(rbx, rcx, times_4, 10000));
__ xorps(xmm0, xmm1);
- __ xordps(xmm0, Operand(rbx, rcx, times_4, 10000));
+ __ xorps(xmm0, Operand(rbx, rcx, times_4, 10000));
// Arithmetic operation
__ addps(xmm1, xmm0);
@@ -355,7 +369,7 @@ TEST(DisasmX64) {
__ subps(xmm1, xmm0);
__ subps(xmm1, Operand(rbx, rcx, times_4, 10000));
__ mulps(xmm1, xmm0);
- __ mulps(xmm1, Operand(rbx, ecx, times_4, 10000));
+ __ mulps(xmm1, Operand(rbx, rcx, times_4, 10000));
__ divps(xmm1, xmm0);
__ divps(xmm1, Operand(rbx, rcx, times_4, 10000));
}
diff --git a/deps/v8/test/cctest/test-fuzz-arm64.cc b/deps/v8/test/cctest/test-fuzz-arm64.cc
new file mode 100644
index 000000000..0ceb60f7b
--- /dev/null
+++ b/deps/v8/test/cctest/test-fuzz-arm64.cc
@@ -0,0 +1,71 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+#include "cctest.h"
+
+#include "arm64/decoder-arm64.h"
+#include "arm64/decoder-arm64-inl.h"
+#include "arm64/disasm-arm64.h"
+
+using namespace v8::internal;
+
+TEST(FUZZ_decoder) {
+ // Feed noise into the decoder to check that it doesn't crash.
+ // 43 million = ~1% of the instruction space.
+ static const int instruction_count = 43 * 1024 * 1024;
+
+ uint16_t seed[3] = {1, 2, 3};
+ seed48(seed);
+
+ Decoder<DispatchingDecoderVisitor> decoder;
+ Instruction buffer[kInstructionSize];
+
+ for (int i = 0; i < instruction_count; i++) {
+ uint32_t instr = mrand48();
+ buffer->SetInstructionBits(instr);
+ decoder.Decode(buffer);
+ }
+}
+
+
+TEST(FUZZ_disasm) {
+ // Feed noise into the disassembler to check that it doesn't crash.
+ // 9 million = ~0.2% of the instruction space.
+ static const int instruction_count = 9 * 1024 * 1024;
+
+ uint16_t seed[3] = {42, 43, 44};
+ seed48(seed);
+
+ Decoder<DispatchingDecoderVisitor> decoder;
+ Disassembler disasm;
+ Instruction buffer[kInstructionSize];
+
+ decoder.AppendVisitor(&disasm);
+ for (int i = 0; i < instruction_count; i++) {
+ uint32_t instr = mrand48();
+ buffer->SetInstructionBits(instr);
+ decoder.Decode(buffer);
+ }
+}
diff --git a/deps/v8/test/cctest/test-hashing.cc b/deps/v8/test/cctest/test-hashing.cc
index 3ec844e9c..66ee04158 100644
--- a/deps/v8/test/cctest/test-hashing.cc
+++ b/deps/v8/test/cctest/test-hashing.cc
@@ -66,21 +66,21 @@ void generate(MacroAssembler* masm, i::Vector<const uint8_t> string) {
__ pop(ebx);
__ Ret();
#elif V8_TARGET_ARCH_X64
- __ push(kRootRegister);
+ __ pushq(kRootRegister);
__ InitializeRootRegister();
- __ push(rbx);
- __ push(rcx);
- __ movq(rax, Immediate(0));
- __ movq(rbx, Immediate(string.at(0)));
+ __ pushq(rbx);
+ __ pushq(rcx);
+ __ movp(rax, Immediate(0));
+ __ movp(rbx, Immediate(string.at(0)));
StringHelper::GenerateHashInit(masm, rax, rbx, rcx);
for (int i = 1; i < string.length(); i++) {
- __ movq(rbx, Immediate(string.at(i)));
+ __ movp(rbx, Immediate(string.at(i)));
StringHelper::GenerateHashAddCharacter(masm, rax, rbx, rcx);
}
StringHelper::GenerateHashGetHash(masm, rax, rcx);
- __ pop(rcx);
- __ pop(rbx);
- __ pop(kRootRegister);
+ __ popq(rcx);
+ __ popq(rbx);
+ __ popq(kRootRegister);
__ Ret();
#elif V8_TARGET_ARCH_ARM
__ push(kRootRegister);
@@ -96,6 +96,24 @@ void generate(MacroAssembler* masm, i::Vector<const uint8_t> string) {
StringHelper::GenerateHashGetHash(masm, r0);
__ pop(kRootRegister);
__ mov(pc, Operand(lr));
+#elif V8_TARGET_ARCH_ARM64
+ // The ARM64 assembler usually uses jssp (x28) as a stack pointer, but only
+ // csp is initialized by the calling (C++) code.
+ Register old_stack_pointer = __ StackPointer();
+ __ SetStackPointer(csp);
+ __ Push(root, xzr);
+ __ InitializeRootRegister();
+ __ Mov(x0, 0);
+ __ Mov(x10, Operand(string.at(0)));
+ StringHelper::GenerateHashInit(masm, x0, x10);
+ for (int i = 1; i < string.length(); i++) {
+ __ Mov(x10, Operand(string.at(i)));
+ StringHelper::GenerateHashAddCharacter(masm, x0, x10);
+ }
+ StringHelper::GenerateHashGetHash(masm, x0, x10);
+ __ Pop(xzr, root);
+ __ Ret();
+ __ SetStackPointer(old_stack_pointer);
#elif V8_TARGET_ARCH_MIPS
__ push(kRootRegister);
__ InitializeRootRegister();
@@ -111,6 +129,8 @@ void generate(MacroAssembler* masm, i::Vector<const uint8_t> string) {
__ pop(kRootRegister);
__ jr(ra);
__ nop();
+#else
+#error Unsupported architecture.
#endif
}
@@ -123,13 +143,13 @@ void generate(MacroAssembler* masm, uint32_t key) {
__ pop(ebx);
__ Ret();
#elif V8_TARGET_ARCH_X64
- __ push(kRootRegister);
+ __ pushq(kRootRegister);
__ InitializeRootRegister();
- __ push(rbx);
- __ movq(rax, Immediate(key));
+ __ pushq(rbx);
+ __ movp(rax, Immediate(key));
__ GetNumberHash(rax, rbx);
- __ pop(rbx);
- __ pop(kRootRegister);
+ __ popq(rbx);
+ __ popq(kRootRegister);
__ Ret();
#elif V8_TARGET_ARCH_ARM
__ push(kRootRegister);
@@ -138,6 +158,18 @@ void generate(MacroAssembler* masm, uint32_t key) {
__ GetNumberHash(r0, ip);
__ pop(kRootRegister);
__ mov(pc, Operand(lr));
+#elif V8_TARGET_ARCH_ARM64
+ // The ARM64 assembler usually uses jssp (x28) as a stack pointer, but only
+ // csp is initialized by the calling (C++) code.
+ Register old_stack_pointer = __ StackPointer();
+ __ SetStackPointer(csp);
+ __ Push(root, xzr);
+ __ InitializeRootRegister();
+ __ Mov(x0, key);
+ __ GetNumberHash(x0, x10);
+ __ Pop(xzr, root);
+ __ Ret();
+ __ SetStackPointer(old_stack_pointer);
#elif V8_TARGET_ARCH_MIPS
__ push(kRootRegister);
__ InitializeRootRegister();
@@ -146,6 +178,8 @@ void generate(MacroAssembler* masm, uint32_t key) {
__ pop(kRootRegister);
__ jr(ra);
__ nop();
+#else
+#error Unsupported architecture.
#endif
}
@@ -172,8 +206,8 @@ void check(i::Vector<const uint8_t> string) {
Handle<String> v8_string = factory->NewStringFromOneByte(string);
v8_string->set_hash_field(String::kEmptyHashField);
#ifdef USE_SIMULATOR
- uint32_t codegen_hash =
- reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0));
+ uint32_t codegen_hash = static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0)));
#else
uint32_t codegen_hash = hash();
#endif
@@ -207,8 +241,8 @@ void check(uint32_t key) {
HASH_FUNCTION hash = FUNCTION_CAST<HASH_FUNCTION>(code->entry());
#ifdef USE_SIMULATOR
- uint32_t codegen_hash =
- reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0));
+ uint32_t codegen_hash = static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0)));
#else
uint32_t codegen_hash = hash();
#endif
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 1caa515a9..f1ccc571d 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -234,9 +234,9 @@ TEST(HeapSnapshotObjectSizes) {
CHECK_NE(NULL, x2);
// Test sizes.
- CHECK_NE(0, x->GetSelfSize());
- CHECK_NE(0, x1->GetSelfSize());
- CHECK_NE(0, x2->GetSelfSize());
+ CHECK_NE(0, static_cast<int>(x->GetShallowSize()));
+ CHECK_NE(0, static_cast<int>(x1->GetShallowSize()));
+ CHECK_NE(0, static_cast<int>(x2->GetShallowSize()));
}
@@ -2067,7 +2067,8 @@ TEST(AllocationSitesAreVisible) {
"elements");
CHECK_NE(NULL, elements);
CHECK_EQ(v8::HeapGraphNode::kArray, elements->GetType());
- CHECK_EQ(v8::internal::FixedArray::SizeFor(3), elements->GetSelfSize());
+ CHECK_EQ(v8::internal::FixedArray::SizeFor(3),
+ static_cast<int>(elements->GetShallowSize()));
v8::Handle<v8::Value> array_val =
heap_profiler->FindObjectById(transition_info->GetId());
@@ -2215,8 +2216,9 @@ static AllocationTraceNode* FindNode(
Vector<AllocationTraceNode*> children = node->children();
node = NULL;
for (int j = 0; j < children.length(); j++) {
- v8::SnapshotObjectId id = children[j]->function_id();
- AllocationTracker::FunctionInfo* info = tracker->GetFunctionInfo(id);
+ unsigned index = children[j]->function_info_index();
+ AllocationTracker::FunctionInfo* info =
+ tracker->function_info_list()[index];
if (info && strcmp(info->name, name) == 0) {
node = children[j];
break;
@@ -2363,6 +2365,34 @@ TEST(TrackBumpPointerAllocations) {
}
+TEST(TrackV8ApiAllocation) {
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+ LocalContext env;
+
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+ const char* names[] = { "(V8 API)" };
+ heap_profiler->StartTrackingHeapObjects(true);
+
+ v8::Handle<v8::Object> o1 = v8::Object::New(env->GetIsolate());
+ o1->Clone();
+
+ AllocationTracker* tracker =
+ reinterpret_cast<i::HeapProfiler*>(heap_profiler)->allocation_tracker();
+ CHECK_NE(NULL, tracker);
+ // Resolve all function locations.
+ tracker->PrepareForSerialization();
+ // Print for better diagnostics in case of failure.
+ tracker->trace_tree()->Print(tracker);
+
+ AllocationTraceNode* node =
+ FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+ CHECK_NE(NULL, node);
+ CHECK_GE(node->allocation_count(), 2);
+ CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
+ heap_profiler->StopTrackingHeapObjects();
+}
+
+
TEST(ArrayBufferAndArrayBufferView) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -2381,6 +2411,71 @@ TEST(ArrayBufferAndArrayBufferView) {
const v8::HeapGraphNode* first_view =
GetProperty(arr1_buffer, v8::HeapGraphEdge::kWeak, "weak_first_view");
CHECK_NE(NULL, first_view);
+ const v8::HeapGraphNode* backing_store =
+ GetProperty(arr1_buffer, v8::HeapGraphEdge::kInternal, "backing_store");
+ CHECK_NE(NULL, backing_store);
+ CHECK_EQ(400, static_cast<int>(backing_store->GetShallowSize()));
+}
+
+
+static int GetRetainersCount(const v8::HeapSnapshot* snapshot,
+ const v8::HeapGraphNode* node) {
+ int count = 0;
+ for (int i = 0, l = snapshot->GetNodesCount(); i < l; ++i) {
+ const v8::HeapGraphNode* parent = snapshot->GetNode(i);
+ for (int j = 0, l2 = parent->GetChildrenCount(); j < l2; ++j) {
+ if (parent->GetChild(j)->GetToNode() == node) {
+ ++count;
+ }
+ }
+ }
+ return count;
+}
+
+
+TEST(ArrayBufferSharedBackingStore) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+ v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
+
+ v8::Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 1024);
+ CHECK_EQ(1024, static_cast<int>(ab->ByteLength()));
+ CHECK(!ab->IsExternal());
+ v8::ArrayBuffer::Contents ab_contents = ab->Externalize();
+ CHECK(ab->IsExternal());
+
+ CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
+ void* data = ab_contents.Data();
+ ASSERT(data != NULL);
+ v8::Local<v8::ArrayBuffer> ab2 =
+ v8::ArrayBuffer::New(isolate, data, ab_contents.ByteLength());
+ CHECK(ab2->IsExternal());
+ env->Global()->Set(v8_str("ab1"), ab);
+ env->Global()->Set(v8_str("ab2"), ab2);
+
+ v8::Handle<v8::Value> result = CompileRun("ab2.byteLength");
+ CHECK_EQ(1024, result->Int32Value());
+
+ const v8::HeapSnapshot* snapshot =
+ heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ CHECK(ValidateSnapshot(snapshot));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* ab1_node =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "ab1");
+ CHECK_NE(NULL, ab1_node);
+ const v8::HeapGraphNode* ab1_data =
+ GetProperty(ab1_node, v8::HeapGraphEdge::kInternal, "backing_store");
+ CHECK_NE(NULL, ab1_data);
+ const v8::HeapGraphNode* ab2_node =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "ab2");
+ CHECK_NE(NULL, ab2_node);
+ const v8::HeapGraphNode* ab2_data =
+ GetProperty(ab2_node, v8::HeapGraphEdge::kInternal, "backing_store");
+ CHECK_NE(NULL, ab2_data);
+ CHECK_EQ(ab1_data, ab2_data);
+ CHECK_EQ(2, GetRetainersCount(snapshot, ab1_data));
+ free(data);
}
@@ -2411,3 +2506,63 @@ TEST(BoxObject) {
GetProperty(box_node, v8::HeapGraphEdge::kInternal, "value");
CHECK_NE(NULL, box_value);
}
+
+
+static inline i::Address ToAddress(int n) {
+ return reinterpret_cast<i::Address>(n);
+}
+
+
+TEST(AddressToTraceMap) {
+ i::AddressToTraceMap map;
+
+ CHECK_EQ(0, map.GetTraceNodeId(ToAddress(150)));
+
+ // [0x100, 0x200) -> 1
+ map.AddRange(ToAddress(0x100), 0x100, 1U);
+ CHECK_EQ(0, map.GetTraceNodeId(ToAddress(0x50)));
+ CHECK_EQ(1, map.GetTraceNodeId(ToAddress(0x100)));
+ CHECK_EQ(1, map.GetTraceNodeId(ToAddress(0x150)));
+ CHECK_EQ(0, map.GetTraceNodeId(ToAddress(0x100 + 0x100)));
+ CHECK_EQ(1, static_cast<int>(map.size()));
+
+ // [0x100, 0x200) -> 1, [0x200, 0x300) -> 2
+ map.AddRange(ToAddress(0x200), 0x100, 2U);
+ CHECK_EQ(2, map.GetTraceNodeId(ToAddress(0x2a0)));
+ CHECK_EQ(2, static_cast<int>(map.size()));
+
+ // [0x100, 0x180) -> 1, [0x180, 0x280) -> 3, [0x280, 0x300) -> 2
+ map.AddRange(ToAddress(0x180), 0x100, 3U);
+ CHECK_EQ(1, map.GetTraceNodeId(ToAddress(0x17F)));
+ CHECK_EQ(2, map.GetTraceNodeId(ToAddress(0x280)));
+ CHECK_EQ(3, map.GetTraceNodeId(ToAddress(0x180)));
+ CHECK_EQ(3, static_cast<int>(map.size()));
+
+ // [0x100, 0x180) -> 1, [0x180, 0x280) -> 3, [0x280, 0x300) -> 2,
+ // [0x400, 0x500) -> 4
+ map.AddRange(ToAddress(0x400), 0x100, 4U);
+ CHECK_EQ(1, map.GetTraceNodeId(ToAddress(0x17F)));
+ CHECK_EQ(2, map.GetTraceNodeId(ToAddress(0x280)));
+ CHECK_EQ(3, map.GetTraceNodeId(ToAddress(0x180)));
+ CHECK_EQ(4, map.GetTraceNodeId(ToAddress(0x450)));
+ CHECK_EQ(0, map.GetTraceNodeId(ToAddress(0x500)));
+ CHECK_EQ(0, map.GetTraceNodeId(ToAddress(0x350)));
+ CHECK_EQ(4, static_cast<int>(map.size()));
+
+ // [0x100, 0x180) -> 1, [0x180, 0x200) -> 3, [0x200, 0x600) -> 5
+ map.AddRange(ToAddress(0x200), 0x400, 5U);
+ CHECK_EQ(5, map.GetTraceNodeId(ToAddress(0x200)));
+ CHECK_EQ(5, map.GetTraceNodeId(ToAddress(0x400)));
+ CHECK_EQ(3, static_cast<int>(map.size()));
+
+ // [0x100, 0x180) -> 1, [0x180, 0x200) -> 7, [0x200, 0x600) ->5
+ map.AddRange(ToAddress(0x180), 0x80, 6U);
+ map.AddRange(ToAddress(0x180), 0x80, 7U);
+ CHECK_EQ(7, map.GetTraceNodeId(ToAddress(0x180)));
+ CHECK_EQ(5, map.GetTraceNodeId(ToAddress(0x200)));
+ CHECK_EQ(3, static_cast<int>(map.size()));
+
+ map.Clear();
+ CHECK_EQ(0, static_cast<int>(map.size()));
+ CHECK_EQ(0, map.GetTraceNodeId(ToAddress(0x400)));
+}
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index 3e8d93b3a..c1f20f1f0 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -148,6 +148,16 @@ static void CheckFindCodeObject(Isolate* isolate) {
}
+TEST(HandleNull) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope outer_scope(isolate);
+ LocalContext context;
+ Handle<Object> n(reinterpret_cast<Object*>(NULL), isolate);
+ CHECK(!n.is_null());
+}
+
+
TEST(HeapObjects) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -180,7 +190,7 @@ TEST(HeapObjects) {
CHECK(value->IsNumber());
CHECK_EQ(Smi::kMaxValue, Smi::cast(value)->value());
-#ifndef V8_TARGET_ARCH_X64
+#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM64)
// TODO(lrn): We need a NumberFromIntptr function in order to test this.
value = heap->NumberFromInt32(Smi::kMinValue - 1)->ToObjectChecked();
CHECK(value->IsHeapNumber());
@@ -275,11 +285,11 @@ TEST(GarbageCollection) {
Handle<Map> initial_map =
factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
function->set_initial_map(*initial_map);
- JSReceiver::SetProperty(global, name, function, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(global, name, function, NONE, SLOPPY);
// Allocate an object. Unrooted after leaving the scope.
Handle<JSObject> obj = factory->NewJSObject(function);
- JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, kNonStrictMode);
- JSReceiver::SetProperty(obj, prop_namex, twenty_four, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, SLOPPY);
+ JSReceiver::SetProperty(obj, prop_namex, twenty_four, NONE, SLOPPY);
CHECK_EQ(Smi::FromInt(23), obj->GetProperty(*prop_name));
CHECK_EQ(Smi::FromInt(24), obj->GetProperty(*prop_namex));
@@ -299,8 +309,8 @@ TEST(GarbageCollection) {
HandleScope inner_scope(isolate);
// Allocate another object, make it reachable from global.
Handle<JSObject> obj = factory->NewJSObject(function);
- JSReceiver::SetProperty(global, obj_name, obj, NONE, kNonStrictMode);
- JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(global, obj_name, obj, NONE, SLOPPY);
+ JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, SLOPPY);
}
// After gc, it should survive.
@@ -433,7 +443,7 @@ TEST(WeakGlobalHandlesScavenge) {
&TestWeakGlobalHandleCallback);
// Scavenge treats weak pointers as normal roots.
- heap->PerformScavenge();
+ heap->CollectGarbage(NEW_SPACE);
CHECK((*h1)->IsString());
CHECK((*h2)->IsHeapNumber());
@@ -518,7 +528,7 @@ TEST(DeleteWeakGlobalHandle) {
&TestWeakGlobalHandleCallback);
// Scanvenge does not recognize weak reference.
- heap->PerformScavenge();
+ heap->CollectGarbage(NEW_SPACE);
CHECK(!WeakPointerCleared);
@@ -635,11 +645,10 @@ TEST(FunctionAllocation) {
Handle<String> prop_name = factory->InternalizeUtf8String("theSlot");
Handle<JSObject> obj = factory->NewJSObject(function);
- JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, SLOPPY);
CHECK_EQ(Smi::FromInt(23), obj->GetProperty(*prop_name));
// Check that we can add properties to function objects.
- JSReceiver::SetProperty(function, prop_name, twenty_four, NONE,
- kNonStrictMode);
+ JSReceiver::SetProperty(function, prop_name, twenty_four, NONE, SLOPPY);
CHECK_EQ(Smi::FromInt(24), function->GetProperty(*prop_name));
}
@@ -666,7 +675,7 @@ TEST(ObjectProperties) {
CHECK(!JSReceiver::HasLocalProperty(obj, first));
// add first
- JSReceiver::SetProperty(obj, first, one, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, first, one, NONE, SLOPPY);
CHECK(JSReceiver::HasLocalProperty(obj, first));
// delete first
@@ -674,8 +683,8 @@ TEST(ObjectProperties) {
CHECK(!JSReceiver::HasLocalProperty(obj, first));
// add first and then second
- JSReceiver::SetProperty(obj, first, one, NONE, kNonStrictMode);
- JSReceiver::SetProperty(obj, second, two, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, first, one, NONE, SLOPPY);
+ JSReceiver::SetProperty(obj, second, two, NONE, SLOPPY);
CHECK(JSReceiver::HasLocalProperty(obj, first));
CHECK(JSReceiver::HasLocalProperty(obj, second));
@@ -687,8 +696,8 @@ TEST(ObjectProperties) {
CHECK(!JSReceiver::HasLocalProperty(obj, second));
// add first and then second
- JSReceiver::SetProperty(obj, first, one, NONE, kNonStrictMode);
- JSReceiver::SetProperty(obj, second, two, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, first, one, NONE, SLOPPY);
+ JSReceiver::SetProperty(obj, second, two, NONE, SLOPPY);
CHECK(JSReceiver::HasLocalProperty(obj, first));
CHECK(JSReceiver::HasLocalProperty(obj, second));
@@ -702,14 +711,14 @@ TEST(ObjectProperties) {
// check string and internalized string match
const char* string1 = "fisk";
Handle<String> s1 = factory->NewStringFromAscii(CStrVector(string1));
- JSReceiver::SetProperty(obj, s1, one, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, s1, one, NONE, SLOPPY);
Handle<String> s1_string = factory->InternalizeUtf8String(string1);
CHECK(JSReceiver::HasLocalProperty(obj, s1_string));
// check internalized string and string match
const char* string2 = "fugl";
Handle<String> s2_string = factory->InternalizeUtf8String(string2);
- JSReceiver::SetProperty(obj, s2_string, one, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, s2_string, one, NONE, SLOPPY);
Handle<String> s2 = factory->NewStringFromAscii(CStrVector(string2));
CHECK(JSReceiver::HasLocalProperty(obj, s2));
}
@@ -733,7 +742,7 @@ TEST(JSObjectMaps) {
// Set a propery
Handle<Smi> twenty_three(Smi::FromInt(23), isolate);
- JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, SLOPPY);
CHECK_EQ(Smi::FromInt(23), obj->GetProperty(*prop_name));
// Check the map has changed
@@ -757,23 +766,23 @@ TEST(JSArray) {
Handle<JSObject> object = factory->NewJSObject(function);
Handle<JSArray> array = Handle<JSArray>::cast(object);
// We just initialized the VM, no heap allocation failure yet.
- array->Initialize(0)->ToObjectChecked();
+ JSArray::Initialize(array, 0);
// Set array length to 0.
- array->SetElementsLength(Smi::FromInt(0))->ToObjectChecked();
+ *JSArray::SetElementsLength(array, handle(Smi::FromInt(0), isolate));
CHECK_EQ(Smi::FromInt(0), array->length());
// Must be in fast mode.
CHECK(array->HasFastSmiOrObjectElements());
// array[length] = name.
- JSReceiver::SetElement(array, 0, name, NONE, kNonStrictMode);
+ JSReceiver::SetElement(array, 0, name, NONE, SLOPPY);
CHECK_EQ(Smi::FromInt(1), array->length());
- CHECK_EQ(array->GetElement(isolate, 0), *name);
+ CHECK_EQ(*i::Object::GetElement(isolate, array, 0), *name);
// Set array length with larger than smi value.
Handle<Object> length =
factory->NewNumberFromUint(static_cast<uint32_t>(Smi::kMaxValue) + 1);
- array->SetElementsLength(*length)->ToObjectChecked();
+ *JSArray::SetElementsLength(array, length);
uint32_t int_length = 0;
CHECK(length->ToArrayIndex(&int_length));
@@ -781,12 +790,12 @@ TEST(JSArray) {
CHECK(array->HasDictionaryElements()); // Must be in slow mode.
// array[length] = name.
- JSReceiver::SetElement(array, int_length, name, NONE, kNonStrictMode);
+ JSReceiver::SetElement(array, int_length, name, NONE, SLOPPY);
uint32_t new_int_length = 0;
CHECK(array->length()->ToArrayIndex(&new_int_length));
CHECK_EQ(static_cast<double>(int_length), new_int_length - 1);
- CHECK_EQ(array->GetElement(isolate, int_length), *name);
- CHECK_EQ(array->GetElement(isolate, 0), *name);
+ CHECK_EQ(*i::Object::GetElement(isolate, array, int_length), *name);
+ CHECK_EQ(*i::Object::GetElement(isolate, array, 0), *name);
}
@@ -808,31 +817,35 @@ TEST(JSObjectCopy) {
Handle<Smi> one(Smi::FromInt(1), isolate);
Handle<Smi> two(Smi::FromInt(2), isolate);
- JSReceiver::SetProperty(obj, first, one, NONE, kNonStrictMode);
- JSReceiver::SetProperty(obj, second, two, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(obj, first, one, NONE, SLOPPY);
+ JSReceiver::SetProperty(obj, second, two, NONE, SLOPPY);
- JSReceiver::SetElement(obj, 0, first, NONE, kNonStrictMode);
- JSReceiver::SetElement(obj, 1, second, NONE, kNonStrictMode);
+ JSReceiver::SetElement(obj, 0, first, NONE, SLOPPY);
+ JSReceiver::SetElement(obj, 1, second, NONE, SLOPPY);
// Make the clone.
Handle<JSObject> clone = JSObject::Copy(obj);
CHECK(!clone.is_identical_to(obj));
- CHECK_EQ(obj->GetElement(isolate, 0), clone->GetElement(isolate, 0));
- CHECK_EQ(obj->GetElement(isolate, 1), clone->GetElement(isolate, 1));
+ CHECK_EQ(*i::Object::GetElement(isolate, obj, 0),
+ *i::Object::GetElement(isolate, clone, 0));
+ CHECK_EQ(*i::Object::GetElement(isolate, obj, 1),
+ *i::Object::GetElement(isolate, clone, 1));
CHECK_EQ(obj->GetProperty(*first), clone->GetProperty(*first));
CHECK_EQ(obj->GetProperty(*second), clone->GetProperty(*second));
// Flip the values.
- JSReceiver::SetProperty(clone, first, two, NONE, kNonStrictMode);
- JSReceiver::SetProperty(clone, second, one, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(clone, first, two, NONE, SLOPPY);
+ JSReceiver::SetProperty(clone, second, one, NONE, SLOPPY);
- JSReceiver::SetElement(clone, 0, second, NONE, kNonStrictMode);
- JSReceiver::SetElement(clone, 1, first, NONE, kNonStrictMode);
+ JSReceiver::SetElement(clone, 0, second, NONE, SLOPPY);
+ JSReceiver::SetElement(clone, 1, first, NONE, SLOPPY);
- CHECK_EQ(obj->GetElement(isolate, 1), clone->GetElement(isolate, 0));
- CHECK_EQ(obj->GetElement(isolate, 0), clone->GetElement(isolate, 1));
+ CHECK_EQ(*i::Object::GetElement(isolate, obj, 1),
+ *i::Object::GetElement(isolate, clone, 0));
+ CHECK_EQ(*i::Object::GetElement(isolate, obj, 0),
+ *i::Object::GetElement(isolate, clone, 1));
CHECK_EQ(obj->GetProperty(*second), clone->GetProperty(*first));
CHECK_EQ(obj->GetProperty(*first), clone->GetProperty(*second));
@@ -1022,7 +1035,7 @@ TEST(Regression39128) {
// Step 4: clone jsobject, but force always allocate first to create a clone
// in old pointer space.
Address old_pointer_space_top = heap->old_pointer_space()->top();
- AlwaysAllocateScope aa_scope;
+ AlwaysAllocateScope aa_scope(isolate);
Object* clone_obj = heap->CopyJSObject(jsobject)->ToObjectChecked();
JSObject* clone = JSObject::cast(clone_obj);
if (clone->address() != old_pointer_space_top) {
@@ -1436,7 +1449,7 @@ TEST(TestInternalWeakLists) {
// Scavenge treats these references as strong.
for (int j = 0; j < 10; j++) {
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(NEW_SPACE);
CHECK_EQ(opt ? 5 : 0, CountOptimizedUserFunctions(ctx[i]));
}
@@ -1448,14 +1461,14 @@ TEST(TestInternalWeakLists) {
// Get rid of f3 and f5 in the same way.
CompileRun("f3=null");
for (int j = 0; j < 10; j++) {
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(NEW_SPACE);
CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
}
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
CompileRun("f5=null");
for (int j = 0; j < 10; j++) {
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(NEW_SPACE);
CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
}
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
@@ -1477,7 +1490,7 @@ TEST(TestInternalWeakLists) {
// Scavenge treats these references as strong.
for (int j = 0; j < 10; j++) {
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
CHECK_EQ(kNumTestContexts - i, CountNativeContexts());
}
@@ -1596,7 +1609,7 @@ TEST(TestSizeOfObjects) {
{
// Allocate objects on several different old-space pages so that
// lazy sweeping kicks in for subsequent GC runs.
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(CcTest::i_isolate());
int filler_size = static_cast<int>(FixedArray::SizeFor(8192));
for (int i = 1; i <= 100; i++) {
CcTest::heap()->AllocateFixedArray(8192, TENURED)->ToObjectChecked();
@@ -1663,7 +1676,7 @@ static void FillUpNewSpace(NewSpace* new_space) {
Isolate* isolate = heap->isolate();
Factory* factory = isolate->factory();
HandleScope scope(isolate);
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(isolate);
intptr_t available = new_space->EffectiveCapacity() - new_space->Size();
intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1;
for (intptr_t i = 0; i < number_of_fillers; i++) {
@@ -2004,8 +2017,14 @@ TEST(PrototypeTransitionClearing) {
Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
+ CompileRun("var base = {};");
+ Handle<JSObject> baseObject =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Object>::Cast(
+ CcTest::global()->Get(v8_str("base"))));
+ int initialTransitions = baseObject->map()->NumberOfProtoTransitions();
+
CompileRun(
- "var base = {};"
"var live = [];"
"for (var i = 0; i < 10; i++) {"
" var object = {};"
@@ -2014,32 +2033,29 @@ TEST(PrototypeTransitionClearing) {
" if (i >= 3) live.push(object, prototype);"
"}");
- Handle<JSObject> baseObject =
- v8::Utils::OpenHandle(
- *v8::Handle<v8::Object>::Cast(
- CcTest::global()->Get(v8_str("base"))));
-
// Verify that only dead prototype transitions are cleared.
- CHECK_EQ(10, baseObject->map()->NumberOfProtoTransitions());
+ CHECK_EQ(initialTransitions + 10,
+ baseObject->map()->NumberOfProtoTransitions());
CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
const int transitions = 10 - 3;
- CHECK_EQ(transitions, baseObject->map()->NumberOfProtoTransitions());
+ CHECK_EQ(initialTransitions + transitions,
+ baseObject->map()->NumberOfProtoTransitions());
// Verify that prototype transitions array was compacted.
FixedArray* trans = baseObject->map()->GetPrototypeTransitions();
- for (int i = 0; i < transitions; i++) {
+ for (int i = initialTransitions; i < initialTransitions + transitions; i++) {
int j = Map::kProtoTransitionHeaderSize +
i * Map::kProtoTransitionElementsPerEntry;
CHECK(trans->get(j + Map::kProtoTransitionMapOffset)->IsMap());
Object* proto = trans->get(j + Map::kProtoTransitionPrototypeOffset);
- CHECK(proto->IsTheHole() || proto->IsJSObject());
+ CHECK(proto->IsJSObject());
}
// Make sure next prototype is placed on an old-space evacuation candidate.
Handle<JSObject> prototype;
PagedSpace* space = CcTest::heap()->old_pointer_space();
{
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(isolate);
SimulateFullSpace(space);
prototype = factory->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS, TENURED);
}
@@ -2167,7 +2183,7 @@ TEST(OptimizedAllocationAlwaysInNewSpace) {
v8::HandleScope scope(CcTest::isolate());
SimulateFullSpace(CcTest::heap()->new_space());
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(CcTest::i_isolate());
v8::Local<v8::Value> res = CompileRun(
"function c(x) {"
" this.x = x;"
@@ -2210,10 +2226,10 @@ TEST(OptimizedPretenuringAllocationFolding) {
"var number_elements = 20000;"
"var elements = new Array();"
"function f() {"
- " for (var i = 0; i < 20000-1; i++) {"
+ " for (var i = 0; i < number_elements; i++) {"
" elements[i] = new DataObject();"
" }"
- " return new DataObject()"
+ " return elements[number_elements-1]"
"};"
"f(); f(); f();"
"%OptimizeFunctionOnNextCall(f);"
@@ -2512,6 +2528,44 @@ TEST(OptimizedPretenuringNestedDoubleLiterals) {
}
+// Make sure pretenuring feedback is gathered for constructed objects as well
+// as for literals.
+TEST(OptimizedPretenuringConstructorCalls) {
+ if (!FLAG_allocation_site_pretenuring || !i::FLAG_pretenuring_call_new) {
+ // FLAG_pretenuring_call_new needs to be synced with the snapshot.
+ return;
+ }
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_max_new_space_size = 2048;
+ CcTest::InitializeVM();
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
+ v8::HandleScope scope(CcTest::isolate());
+
+ v8::Local<v8::Value> res = CompileRun(
+ "var number_elements = 20000;"
+ "var elements = new Array(number_elements);"
+ "function foo() {"
+ " this.a = 3;"
+ " this.b = {};"
+ "}"
+ "function f() {"
+ " for (var i = 0; i < number_elements; i++) {"
+ " elements[i] = new foo();"
+ " }"
+ " return elements[number_elements - 1];"
+ "};"
+ "f(); f(); f();"
+ "%OptimizeFunctionOnNextCall(f);"
+ "f();");
+
+ Handle<JSObject> o =
+ v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
+
+ CHECK(CcTest::heap()->InOldPointerSpace(*o));
+}
+
+
// Test regular array literals allocation.
TEST(OptimizedAllocationArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
@@ -2539,6 +2593,7 @@ TEST(OptimizedAllocationArrayLiterals) {
}
+// Test global pretenuring call new.
TEST(OptimizedPretenuringCallNew) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_allocation_site_pretenuring = false;
@@ -2549,7 +2604,7 @@ TEST(OptimizedPretenuringCallNew) {
v8::HandleScope scope(CcTest::isolate());
CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(CcTest::i_isolate());
v8::Local<v8::Value> res = CompileRun(
"function g() { this.a = 0; }"
"function f() {"
@@ -2581,7 +2636,7 @@ TEST(Regress1465) {
static const int transitions_count = 256;
{
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(CcTest::i_isolate());
for (int i = 0; i < transitions_count; i++) {
EmbeddedVector<char, 64> buffer;
OS::SNPrintF(buffer, "var o = new Object; o.prop%d = %d;", i, i);
@@ -2711,7 +2766,7 @@ TEST(ReleaseOverReservedPages) {
PagedSpace* old_pointer_space = heap->old_pointer_space();
CHECK_EQ(1, old_pointer_space->CountTotalPages());
for (int i = 0; i < number_of_test_pages; i++) {
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(isolate);
SimulateFullSpace(old_pointer_space);
factory->NewFixedArray(1, TENURED);
}
@@ -2760,7 +2815,7 @@ TEST(Regress2237) {
// Generate a sliced string that is based on the above parent and
// lives in old-space.
SimulateFullSpace(CcTest::heap()->new_space());
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(isolate);
Handle<String> t = factory->NewProperSubString(s, 5, 35);
CHECK(t->IsSlicedString());
CHECK(!CcTest::heap()->InNewSpace(*t));
@@ -2826,7 +2881,7 @@ TEST(Regress2211) {
}
-TEST(IncrementalMarkingClearsTypeFeedbackCells) {
+TEST(IncrementalMarkingClearsTypeFeedbackInfo) {
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -2849,23 +2904,27 @@ TEST(IncrementalMarkingClearsTypeFeedbackCells) {
CcTest::global()->Set(v8_str("fun1"), fun1);
CcTest::global()->Set(v8_str("fun2"), fun2);
CompileRun("function f(a, b) { a(); b(); } f(fun1, fun2);");
+
Handle<JSFunction> f =
v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(
CcTest::global()->Get(v8_str("f"))));
- Handle<TypeFeedbackCells> cells(TypeFeedbackInfo::cast(
- f->shared()->code()->type_feedback_info())->type_feedback_cells());
- CHECK_EQ(2, cells->CellCount());
- CHECK(cells->GetCell(0)->value()->IsJSFunction());
- CHECK(cells->GetCell(1)->value()->IsJSFunction());
+ Handle<FixedArray> feedback_vector(TypeFeedbackInfo::cast(
+ f->shared()->code()->type_feedback_info())->feedback_vector());
+
+ CHECK_EQ(2, feedback_vector->length());
+ CHECK(feedback_vector->get(0)->IsJSFunction());
+ CHECK(feedback_vector->get(1)->IsJSFunction());
SimulateIncrementalMarking();
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
- CHECK_EQ(2, cells->CellCount());
- CHECK(cells->GetCell(0)->value()->IsTheHole());
- CHECK(cells->GetCell(1)->value()->IsTheHole());
+ CHECK_EQ(2, feedback_vector->length());
+ CHECK_EQ(feedback_vector->get(0),
+ *TypeFeedbackInfo::UninitializedSentinel(CcTest::i_isolate()));
+ CHECK_EQ(feedback_vector->get(1),
+ *TypeFeedbackInfo::UninitializedSentinel(CcTest::i_isolate()));
}
@@ -3034,6 +3093,11 @@ void ReleaseStackTraceDataTest(const char* source, const char* accessor) {
TEST(ReleaseStackTraceData) {
+ if (i::FLAG_always_opt) {
+ // TODO(ulan): Remove this once the memory leak via code_next_link is fixed.
+ // See: https://codereview.chromium.org/181833004/
+ return;
+ }
FLAG_use_ic = false; // ICs retain objects.
FLAG_concurrent_recompilation = false;
CcTest::InitializeVM();
@@ -3344,7 +3408,7 @@ TEST(Regress169928) {
// This should crash with a protection violation if we are running a build
// with the bug.
- AlwaysAllocateScope aa_scope;
+ AlwaysAllocateScope aa_scope(isolate);
v8::Script::Compile(mote_code_string)->Run();
}
@@ -3686,3 +3750,166 @@ TEST(ObjectsInOptimizedCodeAreWeak) {
ASSERT(code->marked_for_deoptimization());
}
+
+
+
+static Handle<JSFunction> OptimizeDummyFunction(const char* name) {
+ EmbeddedVector<char, 256> source;
+ OS::SNPrintF(source,
+ "function %s() { return 0; }"
+ "%s(); %s();"
+ "%%OptimizeFunctionOnNextCall(%s);"
+ "%s();", name, name, name, name, name);
+ CompileRun(source.start());
+ Handle<JSFunction> fun =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ CcTest::global()->Get(v8_str(name))));
+ return fun;
+}
+
+
+static int GetCodeChainLength(Code* code) {
+ int result = 0;
+ while (code->next_code_link()->IsCode()) {
+ result++;
+ code = Code::cast(code->next_code_link());
+ }
+ return result;
+}
+
+
+TEST(NextCodeLinkIsWeak) {
+ i::FLAG_allow_natives_syntax = true;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ v8::internal::Heap* heap = CcTest::heap();
+
+ if (!isolate->use_crankshaft()) return;
+ HandleScope outer_scope(heap->isolate());
+ Handle<Code> code;
+ heap->CollectAllAvailableGarbage();
+ int code_chain_length_before, code_chain_length_after;
+ {
+ HandleScope scope(heap->isolate());
+ Handle<JSFunction> mortal = OptimizeDummyFunction("mortal");
+ Handle<JSFunction> immortal = OptimizeDummyFunction("immortal");
+ CHECK_EQ(immortal->code()->next_code_link(), mortal->code());
+ code_chain_length_before = GetCodeChainLength(immortal->code());
+ // Keep the immortal code and let the mortal code die.
+ code = scope.CloseAndEscape(Handle<Code>(immortal->code()));
+ CompileRun("mortal = null; immortal = null;");
+ }
+ heap->CollectAllAvailableGarbage();
+ // Now mortal code should be dead.
+ code_chain_length_after = GetCodeChainLength(*code);
+ CHECK_EQ(code_chain_length_before - 1, code_chain_length_after);
+}
+
+
+static Handle<Code> DummyOptimizedCode(Isolate* isolate) {
+ i::byte buffer[i::Assembler::kMinimalBufferSize];
+ MacroAssembler masm(isolate, buffer, sizeof(buffer));
+ CodeDesc desc;
+ masm.Prologue(BUILD_FUNCTION_FRAME);
+ masm.GetCode(&desc);
+ Handle<Object> undefined(isolate->heap()->undefined_value(), isolate);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::OPTIMIZED_FUNCTION), undefined);
+ CHECK(code->IsCode());
+ return code;
+}
+
+
+TEST(NextCodeLinkIsWeak2) {
+ i::FLAG_allow_natives_syntax = true;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ v8::internal::Heap* heap = CcTest::heap();
+
+ if (!isolate->use_crankshaft()) return;
+ HandleScope outer_scope(heap->isolate());
+ heap->CollectAllAvailableGarbage();
+ Handle<Context> context(Context::cast(heap->native_contexts_list()), isolate);
+ Handle<Code> new_head;
+ Handle<Object> old_head(context->get(Context::OPTIMIZED_CODE_LIST), isolate);
+ {
+ HandleScope scope(heap->isolate());
+ Handle<Code> immortal = DummyOptimizedCode(isolate);
+ Handle<Code> mortal = DummyOptimizedCode(isolate);
+ mortal->set_next_code_link(*old_head);
+ immortal->set_next_code_link(*mortal);
+ context->set(Context::OPTIMIZED_CODE_LIST, *immortal);
+ new_head = scope.CloseAndEscape(immortal);
+ }
+ heap->CollectAllAvailableGarbage();
+ // Now mortal code should be dead.
+ CHECK_EQ(*old_head, new_head->next_code_link());
+}
+
+
+#ifdef DEBUG
+TEST(AddInstructionChangesNewSpacePromotion) {
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_expose_gc = true;
+ i::FLAG_stress_compaction = true;
+ i::FLAG_gc_interval = 1000;
+ CcTest::InitializeVM();
+ if (!i::FLAG_allocation_site_pretenuring) return;
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+
+ CompileRun(
+ "function add(a, b) {"
+ " return a + b;"
+ "}"
+ "add(1, 2);"
+ "add(\"a\", \"b\");"
+ "var oldSpaceObject;"
+ "gc();"
+ "function crash(x) {"
+ " var object = {a: null, b: null};"
+ " var result = add(1.5, x | 0);"
+ " object.a = result;"
+ " oldSpaceObject = object;"
+ " return object;"
+ "}"
+ "crash(1);"
+ "crash(1);"
+ "%OptimizeFunctionOnNextCall(crash);"
+ "crash(1);");
+
+ v8::Handle<v8::Object> global = CcTest::global();
+ v8::Handle<v8::Function> g =
+ v8::Handle<v8::Function>::Cast(global->Get(v8_str("crash")));
+ v8::Handle<v8::Value> args1[] = { v8_num(1) };
+ heap->DisableInlineAllocation();
+ heap->set_allocation_timeout(1);
+ g->Call(global, 1, args1);
+ heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+}
+
+
+void OnFatalErrorExpectOOM(const char* location, const char* message) {
+ // Exit with 0 if the location matches our expectation.
+ exit(strcmp(location, "CALL_AND_RETRY_LAST"));
+}
+
+
+TEST(CEntryStubOOM) {
+ i::FLAG_allow_natives_syntax = true;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ v8::V8::SetFatalErrorHandler(OnFatalErrorExpectOOM);
+
+ v8::Handle<v8::Value> result = CompileRun(
+ "%SetFlags('--gc-interval=1');"
+ "var a = [];"
+ "a.__proto__ = [];"
+ "a.unshift(1)");
+
+ CHECK(result->IsNumber());
+}
+
+#endif // DEBUG
diff --git a/deps/v8/test/cctest/test-javascript-arm64.cc b/deps/v8/test/cctest/test-javascript-arm64.cc
new file mode 100644
index 000000000..bd7a2b285
--- /dev/null
+++ b/deps/v8/test/cctest/test-javascript-arm64.cc
@@ -0,0 +1,266 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <limits.h>
+
+#include "v8.h"
+
+#include "api.h"
+#include "isolate.h"
+#include "compilation-cache.h"
+#include "execution.h"
+#include "snapshot.h"
+#include "platform.h"
+#include "utils.h"
+#include "cctest.h"
+#include "parser.h"
+#include "unicode-inl.h"
+
+using ::v8::Context;
+using ::v8::Extension;
+using ::v8::Function;
+using ::v8::FunctionTemplate;
+using ::v8::Handle;
+using ::v8::HandleScope;
+using ::v8::Local;
+using ::v8::Message;
+using ::v8::MessageCallback;
+using ::v8::Object;
+using ::v8::ObjectTemplate;
+using ::v8::Persistent;
+using ::v8::Script;
+using ::v8::StackTrace;
+using ::v8::String;
+using ::v8::TryCatch;
+using ::v8::Undefined;
+using ::v8::V8;
+using ::v8::Value;
+
+static void ExpectBoolean(bool expected, Local<Value> result) {
+ CHECK(result->IsBoolean());
+ CHECK_EQ(expected, result->BooleanValue());
+}
+
+
+static void ExpectInt32(int32_t expected, Local<Value> result) {
+ CHECK(result->IsInt32());
+ CHECK_EQ(expected, result->Int32Value());
+}
+
+
+static void ExpectNumber(double expected, Local<Value> result) {
+ CHECK(result->IsNumber());
+ CHECK_EQ(expected, result->NumberValue());
+}
+
+
+static void ExpectUndefined(Local<Value> result) {
+ CHECK(result->IsUndefined());
+}
+
+
+// Tests are sorted by order of implementation.
+
+TEST(simple_value) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun("0x271828;");
+ ExpectInt32(0x271828, result);
+}
+
+
+TEST(global_variable) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun("var my_global_var = 0x123; my_global_var;");
+ ExpectInt32(0x123, result);
+}
+
+
+TEST(simple_function_call) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+ "function foo() { return 0x314; }"
+ "foo();");
+ ExpectInt32(0x314, result);
+}
+
+
+TEST(binary_op) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+ "function foo() {"
+ " var a = 0x1200;"
+ " var b = 0x0035;"
+ " return 2 * (a + b - 1);"
+ "}"
+ "foo();");
+ ExpectInt32(0x2468, result);
+}
+
+static void if_comparison_testcontext_helper(
+ char const * op,
+ char const * lhs,
+ char const * rhs,
+ int expect) {
+ char buffer[256];
+ snprintf(buffer, sizeof(buffer),
+ "var lhs = %s;"
+ "var rhs = %s;"
+ "if ( lhs %s rhs ) { 1; }"
+ "else { 0; }",
+ lhs, rhs, op);
+ Local<Value> result = CompileRun(buffer);
+ ExpectInt32(expect, result);
+}
+
+static void if_comparison_effectcontext_helper(
+ char const * op,
+ char const * lhs,
+ char const * rhs,
+ int expect) {
+ char buffer[256];
+ snprintf(buffer, sizeof(buffer),
+ "var lhs = %s;"
+ "var rhs = %s;"
+ "var test = lhs %s rhs;"
+ "if ( test ) { 1; }"
+ "else { 0; }",
+ lhs, rhs, op);
+ Local<Value> result = CompileRun(buffer);
+ ExpectInt32(expect, result);
+}
+
+static void if_comparison_helper(
+ char const * op,
+ int expect_when_lt,
+ int expect_when_eq,
+ int expect_when_gt) {
+ // TODO(all): Non-SMI tests.
+
+ if_comparison_testcontext_helper(op, "1", "3", expect_when_lt);
+ if_comparison_testcontext_helper(op, "5", "5", expect_when_eq);
+ if_comparison_testcontext_helper(op, "9", "7", expect_when_gt);
+
+ if_comparison_effectcontext_helper(op, "1", "3", expect_when_lt);
+ if_comparison_effectcontext_helper(op, "5", "5", expect_when_eq);
+ if_comparison_effectcontext_helper(op, "9", "7", expect_when_gt);
+}
+
+
+TEST(if_comparison) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ if_comparison_helper("<", 1, 0, 0);
+ if_comparison_helper("<=", 1, 1, 0);
+ if_comparison_helper("==", 0, 1, 0);
+ if_comparison_helper("===", 0, 1, 0);
+ if_comparison_helper(">=", 0, 1, 1);
+ if_comparison_helper(">", 0, 0, 1);
+ if_comparison_helper("!=", 1, 0, 1);
+ if_comparison_helper("!==", 1, 0, 1);
+}
+
+
+TEST(unary_plus) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result;
+ // SMI
+ result = CompileRun("var a = 1234; +a");
+ ExpectInt32(1234, result);
+ // Number
+ result = CompileRun("var a = 1234.5; +a");
+ ExpectNumber(1234.5, result);
+ // String (SMI)
+ result = CompileRun("var a = '1234'; +a");
+ ExpectInt32(1234, result);
+ // String (Number)
+ result = CompileRun("var a = '1234.5'; +a");
+ ExpectNumber(1234.5, result);
+ // Check side effects.
+ result = CompileRun("var a = 1234; +(a = 4321); a");
+ ExpectInt32(4321, result);
+}
+
+
+TEST(unary_minus) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result;
+ result = CompileRun("var a = 1234; -a");
+ ExpectInt32(-1234, result);
+ result = CompileRun("var a = 1234.5; -a");
+ ExpectNumber(-1234.5, result);
+ result = CompileRun("var a = 1234; -(a = 4321); a");
+ ExpectInt32(4321, result);
+ result = CompileRun("var a = '1234'; -a");
+ ExpectInt32(-1234, result);
+ result = CompileRun("var a = '1234.5'; -a");
+ ExpectNumber(-1234.5, result);
+}
+
+
+TEST(unary_void) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result;
+ result = CompileRun("var a = 1234; void (a);");
+ ExpectUndefined(result);
+ result = CompileRun("var a = 0; void (a = 42); a");
+ ExpectInt32(42, result);
+ result = CompileRun("var a = 0; void (a = 42);");
+ ExpectUndefined(result);
+}
+
+
+TEST(unary_not) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result;
+ result = CompileRun("var a = 1234; !a");
+ ExpectBoolean(false, result);
+ result = CompileRun("var a = 0; !a");
+ ExpectBoolean(true, result);
+ result = CompileRun("var a = 0; !(a = 1234); a");
+ ExpectInt32(1234, result);
+ result = CompileRun("var a = '1234'; !a");
+ ExpectBoolean(false, result);
+ result = CompileRun("var a = ''; !a");
+ ExpectBoolean(true, result);
+ result = CompileRun("var a = 1234; !!a");
+ ExpectBoolean(true, result);
+ result = CompileRun("var a = 0; !!a");
+ ExpectBoolean(false, result);
+ result = CompileRun("var a = 0; if ( !a ) { 1; } else { 0; }");
+ ExpectInt32(1, result);
+ result = CompileRun("var a = 1; if ( !a ) { 1; } else { 0; }");
+ ExpectInt32(0, result);
+}
diff --git a/deps/v8/test/cctest/test-js-arm64-variables.cc b/deps/v8/test/cctest/test-js-arm64-variables.cc
new file mode 100644
index 000000000..df3f4a829
--- /dev/null
+++ b/deps/v8/test/cctest/test-js-arm64-variables.cc
@@ -0,0 +1,143 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Adapted from test/mjsunit/compiler/variables.js
+
+#include <limits.h>
+
+#include "v8.h"
+
+#include "api.h"
+#include "isolate.h"
+#include "compilation-cache.h"
+#include "execution.h"
+#include "snapshot.h"
+#include "platform.h"
+#include "utils.h"
+#include "cctest.h"
+#include "parser.h"
+#include "unicode-inl.h"
+
+using ::v8::Context;
+using ::v8::Extension;
+using ::v8::Function;
+using ::v8::FunctionTemplate;
+using ::v8::Handle;
+using ::v8::HandleScope;
+using ::v8::Local;
+using ::v8::Message;
+using ::v8::MessageCallback;
+using ::v8::Object;
+using ::v8::ObjectTemplate;
+using ::v8::Persistent;
+using ::v8::Script;
+using ::v8::StackTrace;
+using ::v8::String;
+using ::v8::TryCatch;
+using ::v8::Undefined;
+using ::v8::V8;
+using ::v8::Value;
+
+static void ExpectInt32(int32_t expected, Local<Value> result) {
+ CHECK(result->IsInt32());
+ CHECK_EQ(expected, result->Int32Value());
+}
+
+
+// Global variables.
+TEST(global_variables) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"var x = 0;"
+"function f0() { return x; }"
+"f0();");
+ ExpectInt32(0, result);
+}
+
+
+// Parameters.
+TEST(parameters) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f1(x) { return x; }"
+"f1(1);");
+ ExpectInt32(1, result);
+}
+
+
+// Stack-allocated locals.
+TEST(stack_allocated_locals) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f2() { var x = 2; return x; }"
+"f2();");
+ ExpectInt32(2, result);
+}
+
+
+// Context-allocated locals. Local function forces x into f3's context.
+TEST(context_allocated_locals) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f3(x) {"
+" function g() { return x; }"
+" return x;"
+"}"
+"f3(3);");
+ ExpectInt32(3, result);
+}
+
+
+// Local function reads x from an outer context.
+TEST(read_from_outer_context) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f4(x) {"
+" function g() { return x; }"
+" return g();"
+"}"
+"f4(4);");
+ ExpectInt32(4, result);
+}
+
+
+// Local function reads x from an outer context.
+TEST(lookup_slots) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ Local<Value> result = CompileRun(
+"function f5(x) {"
+" with ({}) return x;"
+"}"
+"f5(5);");
+ ExpectInt32(5, result);
+}
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 65310369c..42af0a555 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -310,7 +310,7 @@ TEST(Issue23768) {
// Script needs to have a name in order to trigger InitLineEnds execution.
v8::Handle<v8::String> origin =
v8::String::NewFromUtf8(CcTest::isolate(), "issue-23768-test");
- v8::Handle<v8::Script> evil_script = v8::Script::Compile(source, origin);
+ v8::Handle<v8::Script> evil_script = CompileWithOrigin(source, origin);
CHECK(!evil_script.IsEmpty());
CHECK(!evil_script->Run().IsEmpty());
i::Handle<i::ExternalTwoByteString> i_source(
@@ -468,7 +468,7 @@ TEST(EquivalenceOfLoggingAndTraversal) {
CcTest::isolate(), reinterpret_cast<const char*>(source.start()),
v8::String::kNormalString, source.length());
v8::TryCatch try_catch;
- v8::Handle<v8::Script> script = v8::Script::Compile(source_str, v8_str(""));
+ v8::Handle<v8::Script> script = CompileWithOrigin(source_str, "");
if (script.IsEmpty()) {
v8::String::Utf8Value exception(try_catch.Exception());
printf("compile: %s\n", *exception);
diff --git a/deps/v8/test/cctest/test-macro-assembler-ia32.cc b/deps/v8/test/cctest/test-macro-assembler-ia32.cc
index 38c738f1d..3ad52712c 100644
--- a/deps/v8/test/cctest/test-macro-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-ia32.cc
@@ -122,6 +122,34 @@ TEST(LoadAndStoreWithRepresentation) {
__ cmp(ebx, edx);
__ j(not_equal, &exit);
+ // Test 5.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm, SSE2);
+ __ mov(eax, Immediate(5)); // Test XMM move immediate.
+ __ Move(xmm0, 0.0);
+ __ Move(xmm1, 0.0);
+ __ ucomisd(xmm0, xmm1);
+ __ j(not_equal, &exit);
+ __ Move(xmm2, 991.01);
+ __ ucomisd(xmm0, xmm2);
+ __ j(equal, &exit);
+ __ Move(xmm0, 991.01);
+ __ ucomisd(xmm0, xmm2);
+ __ j(not_equal, &exit);
+ }
+
+ // Test 6.
+ __ mov(eax, Immediate(6));
+ __ Move(edx, Immediate(0)); // Test Move()
+ __ cmp(edx, Immediate(0));
+ __ j(not_equal, &exit);
+ __ Move(ecx, Immediate(-1));
+ __ cmp(ecx, Immediate(-1));
+ __ j(not_equal, &exit);
+ __ Move(ebx, Immediate(0x77));
+ __ cmp(ebx, Immediate(0x77));
+ __ j(not_equal, &exit);
+
__ xor_(eax, eax); // Success.
__ bind(&exit);
__ add(esp, Immediate(1 * kPointerSize));
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index b20094967..3154aac59 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -132,5 +132,47 @@ TEST(CopyBytes) {
}
+static void TestNaN(const char *code) {
+ // NaN value is different on MIPS and x86 architectures, and TEST(NaNx)
+ // tests checks the case where a x86 NaN value is serialized into the
+ // snapshot on the simulator during cross compilation.
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> context = CcTest::NewContext(PRINT_EXTENSION);
+ v8::Context::Scope context_scope(context);
+
+ v8::Local<v8::Script> script = v8::Script::Compile(v8_str(code));
+ v8::Local<v8::Object> result = v8::Local<v8::Object>::Cast(script->Run());
+ // Have to populate the handle manually, as it's not Cast-able.
+ i::Handle<i::JSObject> o =
+ v8::Utils::OpenHandle<v8::Object, i::JSObject>(result);
+ i::Handle<i::JSArray> array1(reinterpret_cast<i::JSArray*>(*o));
+ i::FixedDoubleArray* a = i::FixedDoubleArray::cast(array1->elements());
+ double value = a->get_scalar(0);
+ CHECK(std::isnan(value) &&
+ i::BitCast<uint64_t>(value) ==
+ i::BitCast<uint64_t>(
+ i::FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+}
+
+
+TEST(NaN0) {
+ TestNaN(
+ "var result;"
+ "for (var i = 0; i < 2; i++) {"
+ " result = new Array(Number.NaN, Number.POSITIVE_INFINITY);"
+ "}"
+ "result;");
+}
+
+
+TEST(NaN1) {
+ TestNaN(
+ "var result;"
+ "for (var i = 0; i < 2; i++) {"
+ " result = [NaN];"
+ "}"
+ "result;");
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 3daed5b45..f29daccea 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -99,8 +99,8 @@ typedef int (*F0)();
static void EntryCode(MacroAssembler* masm) {
// Smi constant register is callee save.
- __ push(i::kSmiConstantRegister);
- __ push(i::kRootRegister);
+ __ pushq(i::kSmiConstantRegister);
+ __ pushq(i::kRootRegister);
__ InitializeSmiConstantRegister();
__ InitializeRootRegister();
}
@@ -112,8 +112,8 @@ static void ExitCode(MacroAssembler* masm) {
__ cmpq(rdx, i::kSmiConstantRegister);
__ movq(rdx, Immediate(-1));
__ cmovq(not_equal, rax, rdx);
- __ pop(i::kRootRegister);
- __ pop(i::kSmiConstantRegister);
+ __ popq(i::kRootRegister);
+ __ popq(i::kSmiConstantRegister);
}
@@ -181,7 +181,7 @@ TEST(SmiMove) {
TestMoveSmi(masm, &exit, 11, Smi::FromInt(-257));
TestMoveSmi(masm, &exit, 12, Smi::FromInt(Smi::kMinValue));
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -277,7 +277,7 @@ TEST(SmiCompare) {
TestSmiCompare(masm, &exit, 0x120, Smi::kMaxValue, Smi::kMinValue);
TestSmiCompare(masm, &exit, 0x130, Smi::kMaxValue, Smi::kMaxValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -380,7 +380,7 @@ TEST(Integer32ToSmi) {
__ j(not_equal, &exit);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -450,7 +450,7 @@ TEST(Integer64PlusConstantToSmi) {
TestI64PlusConstantToSmi(masm, &exit, 0xB0, Smi::kMaxValue, 0);
TestI64PlusConstantToSmi(masm, &exit, 0xC0, twice_max, Smi::kMinValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -490,7 +490,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckSmi(rcx);
__ j(cond, &exit);
@@ -501,7 +501,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckSmi(rcx);
__ j(cond, &exit);
@@ -512,7 +512,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckSmi(rcx);
__ j(cond, &exit);
@@ -523,7 +523,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckSmi(rcx);
__ j(cond, &exit);
@@ -536,7 +536,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckNonNegativeSmi(rcx); // "zero" non-smi.
__ j(cond, &exit);
@@ -553,7 +553,7 @@ TEST(SmiCheck) {
__ j(cond, &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckNonNegativeSmi(rcx); // "Negative" non-smi.
__ j(cond, &exit);
@@ -564,7 +564,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckNonNegativeSmi(rcx); // "Positive" non-smi.
__ j(cond, &exit);
@@ -605,17 +605,17 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckBothSmi(rcx, rdx);
__ j(cond, &exit);
__ incq(rax);
- __ xor_(rdx, Immediate(kSmiTagMask));
+ __ xorq(rdx, Immediate(kSmiTagMask));
cond = masm->CheckBothSmi(rcx, rdx);
__ j(cond, &exit);
__ incq(rax);
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckBothSmi(rcx, rdx);
__ j(cond, &exit);
@@ -649,7 +649,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
// Success
- __ xor_(rax, rax);
+ __ xorq(rax, rax);
__ bind(&exit);
ExitCode(masm);
@@ -736,7 +736,7 @@ TEST(SmiNeg) {
TestSmiNeg(masm, &exit, 0x70, Smi::kMaxValue);
TestSmiNeg(masm, &exit, 0x80, -Smi::kMaxValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -961,7 +961,7 @@ TEST(SmiAdd) {
SmiAddOverflowTest(masm, &exit, 0xE0, -42000);
SmiAddOverflowTest(masm, &exit, 0xF0, Smi::kMinValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1182,7 +1182,7 @@ TEST(SmiSub) {
SmiSubOverflowTest(masm, &exit, 0xF0, Smi::kMinValue);
SmiSubOverflowTest(masm, &exit, 0x100, 0);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1269,7 +1269,7 @@ TEST(SmiMul) {
TestSmiMul(masm, &exit, 0xd0, (Smi::kMinValue / 2), 2);
TestSmiMul(masm, &exit, 0xe0, (Smi::kMinValue / 2) - 1, 2);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1360,8 +1360,8 @@ TEST(SmiDiv) {
EntryCode(masm);
Label exit;
- __ push(r14);
- __ push(r15);
+ __ pushq(r14);
+ __ pushq(r15);
TestSmiDiv(masm, &exit, 0x10, 1, 1);
TestSmiDiv(masm, &exit, 0x20, 1, 0);
TestSmiDiv(masm, &exit, 0x30, -1, 0);
@@ -1383,11 +1383,11 @@ TEST(SmiDiv) {
TestSmiDiv(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
TestSmiDiv(masm, &exit, 0x140, Smi::kMinValue, -1);
- __ xor_(r15, r15); // Success.
+ __ xorq(r15, r15); // Success.
__ bind(&exit);
__ movq(rax, r15);
- __ pop(r15);
- __ pop(r14);
+ __ popq(r15);
+ __ popq(r14);
ExitCode(masm);
__ ret(0);
@@ -1470,8 +1470,8 @@ TEST(SmiMod) {
EntryCode(masm);
Label exit;
- __ push(r14);
- __ push(r15);
+ __ pushq(r14);
+ __ pushq(r15);
TestSmiMod(masm, &exit, 0x10, 1, 1);
TestSmiMod(masm, &exit, 0x20, 1, 0);
TestSmiMod(masm, &exit, 0x30, -1, 0);
@@ -1493,11 +1493,11 @@ TEST(SmiMod) {
TestSmiMod(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
TestSmiMod(masm, &exit, 0x140, Smi::kMinValue, -1);
- __ xor_(r15, r15); // Success.
+ __ xorq(r15, r15); // Success.
__ bind(&exit);
__ movq(rax, r15);
- __ pop(r15);
- __ pop(r14);
+ __ popq(r15);
+ __ popq(r14);
ExitCode(masm);
__ ret(0);
@@ -1573,7 +1573,7 @@ TEST(SmiIndex) {
TestSmiIndex(masm, &exit, 0x40, 1000);
TestSmiIndex(masm, &exit, 0x50, Smi::kMaxValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1590,7 +1590,7 @@ void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) {
__ movl(rax, Immediate(id));
__ Move(rcx, Smi::FromInt(x));
__ Move(rdx, Smi::FromInt(y));
- __ xor_(rdx, Immediate(kSmiTagMask));
+ __ xorq(rdx, Immediate(kSmiTagMask));
__ SelectNonSmi(r9, rcx, rdx, exit);
__ incq(rax);
@@ -1600,7 +1600,7 @@ void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) {
__ incq(rax);
__ Move(rcx, Smi::FromInt(x));
__ Move(rdx, Smi::FromInt(y));
- __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
__ SelectNonSmi(r9, rcx, rdx, exit);
__ incq(rax);
@@ -1611,8 +1611,8 @@ void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) {
Label fail_ok;
__ Move(rcx, Smi::FromInt(x));
__ Move(rdx, Smi::FromInt(y));
- __ xor_(rcx, Immediate(kSmiTagMask));
- __ xor_(rdx, Immediate(kSmiTagMask));
+ __ xorq(rcx, Immediate(kSmiTagMask));
+ __ xorq(rdx, Immediate(kSmiTagMask));
__ SelectNonSmi(r9, rcx, rdx, &fail_ok);
__ jmp(exit);
__ bind(&fail_ok);
@@ -1646,7 +1646,7 @@ TEST(SmiSelectNonSmi) {
TestSelectNonSmi(masm, &exit, 0x80, Smi::kMinValue, Smi::kMaxValue);
TestSelectNonSmi(masm, &exit, 0x90, Smi::kMinValue, Smi::kMinValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1727,7 +1727,7 @@ TEST(SmiAnd) {
TestSmiAnd(masm, &exit, 0xA0, Smi::kMinValue, -1);
TestSmiAnd(masm, &exit, 0xB0, Smi::kMinValue, -1);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1810,7 +1810,7 @@ TEST(SmiOr) {
TestSmiOr(masm, &exit, 0xC0, 0x05555555, 0x0fedcba9);
TestSmiOr(masm, &exit, 0xD0, Smi::kMinValue, -1);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1893,7 +1893,7 @@ TEST(SmiXor) {
TestSmiXor(masm, &exit, 0xC0, 0x5555555, 0x0fedcba9);
TestSmiXor(masm, &exit, 0xD0, Smi::kMinValue, -1);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -1955,7 +1955,7 @@ TEST(SmiNot) {
TestSmiNot(masm, &exit, 0x70, Smi::kMaxValue);
TestSmiNot(masm, &exit, 0x80, 0x05555555);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -2050,7 +2050,7 @@ TEST(SmiShiftLeft) {
TestSmiShiftLeft(masm, &exit, 0x150, Smi::kMinValue);
TestSmiShiftLeft(masm, &exit, 0x190, -1);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -2156,7 +2156,7 @@ TEST(SmiShiftLogicalRight) {
TestSmiShiftLogicalRight(masm, &exit, 0xB0, Smi::kMinValue);
TestSmiShiftLogicalRight(masm, &exit, 0xD0, -1);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -2225,7 +2225,7 @@ TEST(SmiShiftArithmeticRight) {
TestSmiShiftArithmeticRight(masm, &exit, 0x60, Smi::kMinValue);
TestSmiShiftArithmeticRight(masm, &exit, 0x70, -1);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -2291,7 +2291,7 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
TestPositiveSmiPowerUp(masm, &exit, 0x120, 65536);
TestPositiveSmiPowerUp(masm, &exit, 0x140, Smi::kMaxValue);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
@@ -2324,28 +2324,28 @@ TEST(OperandOffset) {
Label exit;
EntryCode(masm);
- __ push(r13);
- __ push(r14);
- __ push(rbx);
- __ push(rbp);
- __ push(Immediate(0x100)); // <-- rbp
+ __ pushq(r13);
+ __ pushq(r14);
+ __ pushq(rbx);
+ __ pushq(rbp);
+ __ pushq(Immediate(0x100)); // <-- rbp
__ movq(rbp, rsp);
- __ push(Immediate(0x101));
- __ push(Immediate(0x102));
- __ push(Immediate(0x103));
- __ push(Immediate(0x104));
- __ push(Immediate(0x105)); // <-- rbx
- __ push(Immediate(0x106));
- __ push(Immediate(0x107));
- __ push(Immediate(0x108));
- __ push(Immediate(0x109)); // <-- rsp
+ __ pushq(Immediate(0x101));
+ __ pushq(Immediate(0x102));
+ __ pushq(Immediate(0x103));
+ __ pushq(Immediate(0x104));
+ __ pushq(Immediate(0x105)); // <-- rbx
+ __ pushq(Immediate(0x106));
+ __ pushq(Immediate(0x107));
+ __ pushq(Immediate(0x108));
+ __ pushq(Immediate(0x109)); // <-- rsp
// rbp = rsp[9]
// r15 = rsp[3]
// rbx = rsp[5]
// r13 = rsp[7]
- __ lea(r14, Operand(rsp, 3 * kPointerSize));
- __ lea(r13, Operand(rbp, -3 * kPointerSize));
- __ lea(rbx, Operand(rbp, -5 * kPointerSize));
+ __ leaq(r14, Operand(rsp, 3 * kPointerSize));
+ __ leaq(r13, Operand(rbp, -3 * kPointerSize));
+ __ leaq(rbx, Operand(rbp, -5 * kPointerSize));
__ movl(rcx, Immediate(2));
__ Move(r8, reinterpret_cast<Address>(&data[128]), RelocInfo::NONE64);
__ movl(rax, Immediate(1));
@@ -2643,11 +2643,11 @@ TEST(OperandOffset) {
__ movl(rax, Immediate(0));
__ bind(&exit);
- __ lea(rsp, Operand(rbp, kPointerSize));
- __ pop(rbp);
- __ pop(rbx);
- __ pop(r14);
- __ pop(r13);
+ __ leaq(rsp, Operand(rbp, kPointerSize));
+ __ popq(rbp);
+ __ popq(rbx);
+ __ popq(r14);
+ __ popq(r13);
ExitCode(masm);
__ ret(0);
@@ -2796,7 +2796,7 @@ TEST(LoadAndStoreWithRepresentation) {
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
- __ xor_(rax, rax); // Success.
+ __ xorq(rax, rax); // Success.
__ bind(&exit);
__ addq(rsp, Immediate(1 * kPointerSize));
ExitCode(masm);
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index 0c95d94f4..0200129b1 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -162,7 +162,7 @@ TEST(MarkCompactCollector) {
SharedFunctionInfo* function_share = SharedFunctionInfo::cast(
heap->AllocateSharedFunctionInfo(func_name)->ToObjectChecked());
JSFunction* function = JSFunction::cast(
- heap->AllocateFunction(*isolate->function_map(),
+ heap->AllocateFunction(*isolate->sloppy_function_map(),
function_share,
heap->undefined_value())->ToObjectChecked());
Map* initial_map =
@@ -170,7 +170,7 @@ TEST(MarkCompactCollector) {
JSObject::kHeaderSize)->ToObjectChecked());
function->set_initial_map(initial_map);
JSReceiver::SetProperty(
- global, handle(func_name), handle(function), NONE, kNonStrictMode);
+ global, handle(func_name), handle(function), NONE, SLOPPY);
JSObject* obj = JSObject::cast(
heap->AllocateJSObject(function)->ToObjectChecked());
@@ -187,13 +187,12 @@ TEST(MarkCompactCollector) {
obj = JSObject::cast(heap->AllocateJSObject(function)->ToObjectChecked());
String* obj_name =
String::cast(heap->InternalizeUtf8String("theObject")->ToObjectChecked());
- JSReceiver::SetProperty(
- global, handle(obj_name), handle(obj), NONE, kNonStrictMode);
+ JSReceiver::SetProperty(global, handle(obj_name), handle(obj), NONE, SLOPPY);
String* prop_name =
String::cast(heap->InternalizeUtf8String("theSlot")->ToObjectChecked());
Handle<Smi> twenty_three(Smi::FromInt(23), isolate);
JSReceiver::SetProperty(
- handle(obj), handle(prop_name), twenty_three, NONE, kNonStrictMode);
+ handle(obj), handle(prop_name), twenty_three, NONE, SLOPPY);
heap->CollectGarbage(OLD_POINTER_SPACE, "trigger 5");
@@ -496,6 +495,7 @@ TEST(BootUpMemoryUse) {
intptr_t initial_memory = MemoryInUse();
// Avoid flakiness.
FLAG_crankshaft = false;
+ FLAG_concurrent_osr = false;
FLAG_concurrent_recompilation = false;
// Only Linux has the proc filesystem and only if it is mapped. If it's not
diff --git a/deps/v8/test/cctest/test-mementos.cc b/deps/v8/test/cctest/test-mementos.cc
index f59eef948..1dc38f9af 100644
--- a/deps/v8/test/cctest/test-mementos.cc
+++ b/deps/v8/test/cctest/test-mementos.cc
@@ -29,11 +29,8 @@
using namespace v8::internal;
-TEST(Regress340063) {
- CcTest::InitializeVM();
- if (!i::FLAG_allocation_site_pretenuring) return;
- v8::HandleScope scope(CcTest::isolate());
+static void SetUpNewSpaceWithPoisonedMementoAtTop() {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
NewSpace* new_space = heap->new_space();
@@ -52,8 +49,75 @@ TEST(Regress340063) {
memento->set_map_no_write_barrier(heap->allocation_memento_map());
memento->set_allocation_site(
reinterpret_cast<AllocationSite*>(kHeapObjectTag), SKIP_WRITE_BARRIER);
+}
+
+
+TEST(Regress340063) {
+ CcTest::InitializeVM();
+ if (!i::FLAG_allocation_site_pretenuring) return;
+ v8::HandleScope scope(CcTest::isolate());
+
+
+ SetUpNewSpaceWithPoisonedMementoAtTop();
// Call GC to see if we can handle a poisonous memento right after the
// current new space top pointer.
- heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CcTest::i_isolate()->heap()->CollectAllGarbage(
+ Heap::kAbortIncrementalMarkingMask);
+}
+
+
+TEST(BadMementoAfterTopForceScavenge) {
+ CcTest::InitializeVM();
+ if (!i::FLAG_allocation_site_pretenuring) return;
+ v8::HandleScope scope(CcTest::isolate());
+
+ SetUpNewSpaceWithPoisonedMementoAtTop();
+
+ // Force GC to test the poisoned memento handling
+ CcTest::i_isolate()->heap()->CollectGarbage(i::NEW_SPACE);
+}
+
+
+TEST(PretenuringCallNew) {
+ CcTest::InitializeVM();
+ if (!i::FLAG_allocation_site_pretenuring) return;
+ if (!i::FLAG_pretenuring_call_new) return;
+
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+
+ // We need to create several instances to get past the slack-tracking
+ // phase, where mementos aren't emitted.
+ int call_count = 10;
+ CHECK_GE(call_count, SharedFunctionInfo::kGenerousAllocationCount);
+ i::ScopedVector<char> test_buf(1024);
+ const char* program =
+ "function f() {"
+ " this.a = 3;"
+ " this.b = {};"
+ " return this;"
+ "};"
+ "var a;"
+ "for(var i = 0; i < %d; i++) {"
+ " a = new f();"
+ "}"
+ "a;";
+ i::OS::SNPrintF(test_buf, program, call_count);
+ v8::Local<v8::Value> res = CompileRun(test_buf.start());
+ Handle<JSObject> o =
+ v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
+
+ // The object of class f should have a memento secreted behind it.
+ Address memento_address = o->address() + o->map()->instance_size();
+ AllocationMemento* memento =
+ reinterpret_cast<AllocationMemento*>(memento_address + kHeapObjectTag);
+ CHECK_EQ(memento->map(), heap->allocation_memento_map());
+
+ // Furthermore, how many mementos did we create? The count should match
+ // call_count - SharedFunctionInfo::kGenerousAllocationCount.
+ AllocationSite* site = memento->GetAllocationSite();
+ CHECK_EQ(call_count - SharedFunctionInfo::kGenerousAllocationCount,
+ site->pretenure_create_count()->value());
}
diff --git a/deps/v8/test/cctest/test-microtask-delivery.cc b/deps/v8/test/cctest/test-microtask-delivery.cc
new file mode 100644
index 000000000..0172726af
--- /dev/null
+++ b/deps/v8/test/cctest/test-microtask-delivery.cc
@@ -0,0 +1,135 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cctest.h"
+
+using namespace v8;
+namespace i = v8::internal;
+
+namespace {
+class HarmonyIsolate {
+ public:
+ HarmonyIsolate() {
+ isolate_ = Isolate::New();
+ isolate_->Enter();
+ }
+
+ ~HarmonyIsolate() {
+ isolate_->Exit();
+ isolate_->Dispose();
+ }
+
+ Isolate* GetIsolate() const { return isolate_; }
+
+ private:
+ Isolate* isolate_;
+};
+}
+
+
+TEST(MicrotaskDeliverySimple) {
+ HarmonyIsolate isolate;
+ HandleScope scope(isolate.GetIsolate());
+ LocalContext context(isolate.GetIsolate());
+ CompileRun(
+ "var ordering = [];"
+ "var resolver = {};"
+ "function handler(resolve) { resolver.resolve = resolve; }"
+ "var obj = {};"
+ "var observeOrders = [1, 4];"
+ "function observer() {"
+ "ordering.push(observeOrders.shift());"
+ "resolver.resolve();"
+ "}"
+ "var p = new Promise(handler);"
+ "p.then(function() {"
+ "ordering.push(2);"
+ "}).then(function() {"
+ "ordering.push(3);"
+ "obj.id++;"
+ "return new Promise(handler);"
+ "}).then(function() {"
+ "ordering.push(5);"
+ "}).then(function() {"
+ "ordering.push(6);"
+ "});"
+ "Object.observe(obj, observer);"
+ "obj.id = 1;");
+ CHECK_EQ(6, CompileRun("ordering.length")->Int32Value());
+ CHECK_EQ(1, CompileRun("ordering[0]")->Int32Value());
+ CHECK_EQ(2, CompileRun("ordering[1]")->Int32Value());
+ CHECK_EQ(3, CompileRun("ordering[2]")->Int32Value());
+ CHECK_EQ(4, CompileRun("ordering[3]")->Int32Value());
+ CHECK_EQ(5, CompileRun("ordering[4]")->Int32Value());
+ CHECK_EQ(6, CompileRun("ordering[5]")->Int32Value());
+}
+
+
+TEST(MicrotaskPerIsolateState) {
+ HarmonyIsolate isolate;
+ HandleScope scope(isolate.GetIsolate());
+ LocalContext context1(isolate.GetIsolate());
+ V8::SetAutorunMicrotasks(isolate.GetIsolate(), false);
+ CompileRun(
+ "var obj = { calls: 0 };");
+ Handle<Value> obj = CompileRun("obj");
+ {
+ LocalContext context2(isolate.GetIsolate());
+ context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ obj);
+ CompileRun(
+ "var resolver = {};"
+ "new Promise(function(resolve) {"
+ "resolver.resolve = resolve;"
+ "}).then(function() {"
+ "obj.calls++;"
+ "});"
+ "(function() {"
+ "resolver.resolve();"
+ "})();");
+ }
+ {
+ LocalContext context3(isolate.GetIsolate());
+ context3->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ obj);
+ CompileRun(
+ "var foo = { id: 1 };"
+ "Object.observe(foo, function() {"
+ "obj.calls++;"
+ "});"
+ "foo.id++;");
+ }
+ {
+ LocalContext context4(isolate.GetIsolate());
+ context4->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ obj);
+ V8::RunMicrotasks(isolate.GetIsolate());
+ CHECK_EQ(2, CompileRun("obj.calls")->Int32Value());
+ }
+}
diff --git a/deps/v8/test/cctest/test-object-observe.cc b/deps/v8/test/cctest/test-object-observe.cc
index 0a30d4e27..6bde5b37e 100644
--- a/deps/v8/test/cctest/test-object-observe.cc
+++ b/deps/v8/test/cctest/test-object-observe.cc
@@ -32,33 +32,10 @@
using namespace v8;
namespace i = v8::internal;
-namespace {
-// Need to create a new isolate when FLAG_harmony_observation is on.
-class HarmonyIsolate {
- public:
- HarmonyIsolate() {
- i::FLAG_harmony_observation = true;
- isolate_ = Isolate::New();
- isolate_->Enter();
- }
-
- ~HarmonyIsolate() {
- isolate_->Exit();
- isolate_->Dispose();
- }
-
- Isolate* GetIsolate() const { return isolate_; }
-
- private:
- Isolate* isolate_;
-};
-}
-
TEST(PerIsolateState) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context1(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context1(CcTest::isolate());
CompileRun(
"var count = 0;"
"var calls = 0;"
@@ -71,29 +48,29 @@ TEST(PerIsolateState) {
"(function() { obj.foo = 'bar'; })");
Handle<Value> notify_fun2;
{
- LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ LocalContext context2(CcTest::isolate());
+ context2->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "obj"),
obj);
notify_fun2 = CompileRun(
"(function() { obj.foo = 'baz'; })");
}
Handle<Value> notify_fun3;
{
- LocalContext context3(isolate.GetIsolate());
- context3->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ LocalContext context3(CcTest::isolate());
+ context3->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "obj"),
obj);
notify_fun3 = CompileRun(
"(function() { obj.foo = 'bat'; })");
}
{
- LocalContext context4(isolate.GetIsolate());
+ LocalContext context4(CcTest::isolate());
context4->Global()->Set(
- String::NewFromUtf8(isolate.GetIsolate(), "observer"), observer);
- context4->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "fun1"),
+ String::NewFromUtf8(CcTest::isolate(), "observer"), observer);
+ context4->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "fun1"),
notify_fun1);
- context4->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "fun2"),
+ context4->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "fun2"),
notify_fun2);
- context4->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "fun3"),
+ context4->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "fun3"),
notify_fun3);
CompileRun("fun1(); fun2(); fun3(); Object.deliverChangeRecords(observer)");
}
@@ -103,9 +80,8 @@ TEST(PerIsolateState) {
TEST(EndOfMicrotaskDelivery) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
CompileRun(
"var obj = {};"
"var count = 0;"
@@ -117,9 +93,8 @@ TEST(EndOfMicrotaskDelivery) {
TEST(DeliveryOrdering) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
CompileRun(
"var obj1 = {};"
"var obj2 = {};"
@@ -149,9 +124,8 @@ TEST(DeliveryOrdering) {
TEST(DeliveryOrderingReentrant) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
CompileRun(
"var obj = {};"
"var reentered = false;"
@@ -181,9 +155,8 @@ TEST(DeliveryOrderingReentrant) {
TEST(DeliveryOrderingDeliverChangeRecords) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
CompileRun(
"var obj = {};"
"var ordering = [];"
@@ -206,21 +179,20 @@ TEST(DeliveryOrderingDeliverChangeRecords) {
TEST(ObjectHashTableGrowth) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
// Initializing this context sets up initial hash tables.
- LocalContext context(isolate.GetIsolate());
+ LocalContext context(CcTest::isolate());
Handle<Value> obj = CompileRun("obj = {};");
Handle<Value> observer = CompileRun(
"var ran = false;"
"(function() { ran = true })");
{
// As does initializing this context.
- LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ LocalContext context2(CcTest::isolate());
+ context2->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "obj"),
obj);
context2->Global()->Set(
- String::NewFromUtf8(isolate.GetIsolate(), "observer"), observer);
+ String::NewFromUtf8(CcTest::isolate(), "observer"), observer);
CompileRun(
"var objArr = [];"
// 100 objects should be enough to make the hash table grow
@@ -238,9 +210,8 @@ TEST(ObjectHashTableGrowth) {
TEST(GlobalObjectObservation) {
- HarmonyIsolate isolate;
- LocalContext context(isolate.GetIsolate());
- HandleScope scope(isolate.GetIsolate());
+ LocalContext context(CcTest::isolate());
+ HandleScope scope(CcTest::isolate());
Handle<Object> global_proxy = context->Global();
CompileRun(
"var records = [];"
@@ -261,7 +232,7 @@ TEST(GlobalObjectObservation) {
// to the old context.
context->DetachGlobal();
{
- LocalContext context2(isolate.GetIsolate());
+ LocalContext context2(CcTest::isolate());
CompileRun(
"var records2 = [];"
"var global = this;"
@@ -279,7 +250,7 @@ TEST(GlobalObjectObservation) {
{
// Delegates to Context::New
LocalContext context3(
- isolate.GetIsolate(), NULL, Handle<ObjectTemplate>(), global_proxy);
+ CcTest::isolate(), NULL, Handle<ObjectTemplate>(), global_proxy);
CompileRun(
"var records3 = [];"
"Object.observe(this, function(r) { [].push.apply(records3, r) });"
@@ -327,12 +298,11 @@ static void ExpectRecords(v8::Isolate* isolate,
}
#define EXPECT_RECORDS(records, expectations) \
- ExpectRecords(isolate.GetIsolate(), records, expectations, \
+ ExpectRecords(CcTest::isolate(), records, expectations, \
ARRAY_SIZE(expectations))
TEST(APITestBasicMutation) {
- HarmonyIsolate isolate;
- v8::Isolate* v8_isolate = isolate.GetIsolate();
+ v8::Isolate* v8_isolate = CcTest::isolate();
HandleScope scope(v8_isolate);
LocalContext context(v8_isolate);
Handle<Object> obj = Handle<Object>::Cast(CompileRun(
@@ -379,8 +349,7 @@ TEST(APITestBasicMutation) {
TEST(HiddenPrototypeObservation) {
- HarmonyIsolate isolate;
- v8::Isolate* v8_isolate = isolate.GetIsolate();
+ v8::Isolate* v8_isolate = CcTest::isolate();
HandleScope scope(v8_isolate);
LocalContext context(v8_isolate);
Handle<FunctionTemplate> tmpl = FunctionTemplate::New(v8_isolate);
@@ -431,15 +400,14 @@ static int NumberOfElements(i::Handle<i::JSWeakMap> map) {
TEST(ObservationWeakMap) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
CompileRun(
"var obj = {};"
"Object.observe(obj, function(){});"
"Object.getNotifier(obj);"
"obj = null;");
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate.GetIsolate());
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(CcTest::isolate());
i::Handle<i::JSObject> observation_state =
i_isolate->factory()->observation_state();
i::Handle<i::JSWeakMap> callbackInfoMap =
@@ -528,17 +496,16 @@ static Handle<Object> CreateAccessCheckedObject(
TEST(NamedAccessCheck) {
- HarmonyIsolate isolate;
const AccessType types[] = { ACCESS_GET, ACCESS_HAS };
for (size_t i = 0; i < ARRAY_SIZE(types); ++i) {
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
g_access_block_type = types[i];
Handle<Object> instance = CreateAccessCheckedObject(
- isolate.GetIsolate(),
+ CcTest::isolate(),
NamedAccessAllowUnlessBlocked,
IndexedAccessAlwaysAllowed,
- String::NewFromUtf8(isolate.GetIsolate(), "foo"));
+ String::NewFromUtf8(CcTest::isolate(), "foo"));
CompileRun("var records = null;"
"var objNoCheck = {};"
"var observer = function(r) { records = r };"
@@ -546,11 +513,11 @@ TEST(NamedAccessCheck) {
"Object.observe(objNoCheck, observer);");
Handle<Value> obj_no_check = CompileRun("objNoCheck");
{
- LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ LocalContext context2(CcTest::isolate());
+ context2->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "obj"),
instance);
context2->Global()->Set(
- String::NewFromUtf8(isolate.GetIsolate(), "objNoCheck"),
+ String::NewFromUtf8(CcTest::isolate(), "objNoCheck"),
obj_no_check);
CompileRun("var records2 = null;"
"var observer2 = function(r) { records2 = r };"
@@ -564,9 +531,9 @@ TEST(NamedAccessCheck) {
const RecordExpectation expected_records2[] = {
{ instance, "add", "foo", Handle<Value>() },
{ instance, "update", "foo",
- String::NewFromUtf8(isolate.GetIsolate(), "bar") },
+ String::NewFromUtf8(CcTest::isolate(), "bar") },
{ instance, "reconfigure", "foo",
- Number::New(isolate.GetIsolate(), 5) },
+ Number::New(CcTest::isolate(), 5) },
{ instance, "add", "bar", Handle<Value>() },
{ obj_no_check, "add", "baz", Handle<Value>() },
};
@@ -582,15 +549,14 @@ TEST(NamedAccessCheck) {
TEST(IndexedAccessCheck) {
- HarmonyIsolate isolate;
const AccessType types[] = { ACCESS_GET, ACCESS_HAS };
for (size_t i = 0; i < ARRAY_SIZE(types); ++i) {
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
g_access_block_type = types[i];
Handle<Object> instance = CreateAccessCheckedObject(
- isolate.GetIsolate(), NamedAccessAlwaysAllowed,
- IndexedAccessAllowUnlessBlocked, Number::New(isolate.GetIsolate(), 7));
+ CcTest::isolate(), NamedAccessAlwaysAllowed,
+ IndexedAccessAllowUnlessBlocked, Number::New(CcTest::isolate(), 7));
CompileRun("var records = null;"
"var objNoCheck = {};"
"var observer = function(r) { records = r };"
@@ -598,11 +564,11 @@ TEST(IndexedAccessCheck) {
"Object.observe(objNoCheck, observer);");
Handle<Value> obj_no_check = CompileRun("objNoCheck");
{
- LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ LocalContext context2(CcTest::isolate());
+ context2->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "obj"),
instance);
context2->Global()->Set(
- String::NewFromUtf8(isolate.GetIsolate(), "objNoCheck"),
+ String::NewFromUtf8(CcTest::isolate(), "objNoCheck"),
obj_no_check);
CompileRun("var records2 = null;"
"var observer2 = function(r) { records2 = r };"
@@ -616,8 +582,8 @@ TEST(IndexedAccessCheck) {
const RecordExpectation expected_records2[] = {
{ instance, "add", "7", Handle<Value>() },
{ instance, "update", "7",
- String::NewFromUtf8(isolate.GetIsolate(), "foo") },
- { instance, "reconfigure", "7", Number::New(isolate.GetIsolate(), 5) },
+ String::NewFromUtf8(CcTest::isolate(), "foo") },
+ { instance, "reconfigure", "7", Number::New(CcTest::isolate(), 5) },
{ instance, "add", "8", Handle<Value>() },
{ obj_no_check, "add", "42", Handle<Value>() }
};
@@ -633,13 +599,12 @@ TEST(IndexedAccessCheck) {
TEST(SpliceAccessCheck) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
g_access_block_type = ACCESS_GET;
Handle<Object> instance = CreateAccessCheckedObject(
- isolate.GetIsolate(), NamedAccessAlwaysAllowed,
- IndexedAccessAllowUnlessBlocked, Number::New(isolate.GetIsolate(), 1));
+ CcTest::isolate(), NamedAccessAlwaysAllowed,
+ IndexedAccessAllowUnlessBlocked, Number::New(CcTest::isolate(), 1));
CompileRun("var records = null;"
"obj[1] = 'foo';"
"obj.length = 2;"
@@ -649,11 +614,11 @@ TEST(SpliceAccessCheck) {
"Array.observe(objNoCheck, observer);");
Handle<Value> obj_no_check = CompileRun("objNoCheck");
{
- LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ LocalContext context2(CcTest::isolate());
+ context2->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "obj"),
instance);
context2->Global()->Set(
- String::NewFromUtf8(isolate.GetIsolate(), "objNoCheck"), obj_no_check);
+ String::NewFromUtf8(CcTest::isolate(), "objNoCheck"), obj_no_check);
CompileRun("var records2 = null;"
"var observer2 = function(r) { records2 = r };"
"Array.observe(obj, observer2);"
@@ -680,11 +645,10 @@ TEST(SpliceAccessCheck) {
TEST(DisallowAllForAccessKeys) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
Handle<Object> instance = CreateAccessCheckedObject(
- isolate.GetIsolate(), BlockAccessKeys, IndexedAccessAlwaysAllowed);
+ CcTest::isolate(), BlockAccessKeys, IndexedAccessAlwaysAllowed);
CompileRun("var records = null;"
"var objNoCheck = {};"
"var observer = function(r) { records = r };"
@@ -692,11 +656,11 @@ TEST(DisallowAllForAccessKeys) {
"Object.observe(objNoCheck, observer);");
Handle<Value> obj_no_check = CompileRun("objNoCheck");
{
- LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ LocalContext context2(CcTest::isolate());
+ context2->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "obj"),
instance);
context2->Global()->Set(
- String::NewFromUtf8(isolate.GetIsolate(), "objNoCheck"), obj_no_check);
+ String::NewFromUtf8(CcTest::isolate(), "objNoCheck"), obj_no_check);
CompileRun("var records2 = null;"
"var observer2 = function(r) { records2 = r };"
"Object.observe(obj, observer2);"
@@ -719,24 +683,23 @@ TEST(DisallowAllForAccessKeys) {
TEST(AccessCheckDisallowApiModifications) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
Handle<Object> instance = CreateAccessCheckedObject(
- isolate.GetIsolate(), BlockAccessKeys, IndexedAccessAlwaysAllowed);
+ CcTest::isolate(), BlockAccessKeys, IndexedAccessAlwaysAllowed);
CompileRun("var records = null;"
"var observer = function(r) { records = r };"
"Object.observe(obj, observer);");
{
- LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(String::NewFromUtf8(isolate.GetIsolate(), "obj"),
+ LocalContext context2(CcTest::isolate());
+ context2->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "obj"),
instance);
CompileRun("var records2 = null;"
"var observer2 = function(r) { records2 = r };"
"Object.observe(obj, observer2);");
- instance->Set(5, String::NewFromUtf8(isolate.GetIsolate(), "bar"));
- instance->Set(String::NewFromUtf8(isolate.GetIsolate(), "foo"),
- String::NewFromUtf8(isolate.GetIsolate(), "bar"));
+ instance->Set(5, String::NewFromUtf8(CcTest::isolate(), "bar"));
+ instance->Set(String::NewFromUtf8(CcTest::isolate(), "foo"),
+ String::NewFromUtf8(CcTest::isolate(), "bar"));
CompileRun(""); // trigger delivery
const RecordExpectation expected_records2[] = {
{ instance, "add", "5", Handle<Value>() },
@@ -749,18 +712,17 @@ TEST(AccessCheckDisallowApiModifications) {
TEST(HiddenPropertiesLeakage) {
- HarmonyIsolate isolate;
- HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
+ HandleScope scope(CcTest::isolate());
+ LocalContext context(CcTest::isolate());
CompileRun("var obj = {};"
"var records = null;"
"var observer = function(r) { records = r };"
"Object.observe(obj, observer);");
Handle<Value> obj =
- context->Global()->Get(String::NewFromUtf8(isolate.GetIsolate(), "obj"));
+ context->Global()->Get(String::NewFromUtf8(CcTest::isolate(), "obj"));
Handle<Object>::Cast(obj)
- ->SetHiddenValue(String::NewFromUtf8(isolate.GetIsolate(), "foo"),
- Null(isolate.GetIsolate()));
+ ->SetHiddenValue(String::NewFromUtf8(CcTest::isolate(), "foo"),
+ Null(CcTest::isolate()));
CompileRun(""); // trigger delivery
CHECK(CompileRun("records")->IsNull());
}
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 22d5056f8..2746388bb 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -35,6 +35,7 @@
#include "compiler.h"
#include "execution.h"
#include "isolate.h"
+#include "objects.h"
#include "parser.h"
#include "preparser.h"
#include "scanner-character-streams.h"
@@ -212,18 +213,25 @@ TEST(Preparsing) {
{
i::FLAG_lazy = true;
ScriptResource* resource = new ScriptResource(source, source_length);
- v8::Local<v8::String> script_source =
- v8::String::NewExternal(isolate, resource);
- v8::Script::Compile(script_source, NULL, preparse);
+ v8::ScriptCompiler::Source script_source(
+ v8::String::NewExternal(isolate, resource),
+ new v8::ScriptCompiler::CachedData(
+ reinterpret_cast<const uint8_t*>(preparse->Data()),
+ preparse->Length()));
+ v8::ScriptCompiler::Compile(isolate,
+ &script_source);
}
{
i::FLAG_lazy = false;
ScriptResource* resource = new ScriptResource(source, source_length);
- v8::Local<v8::String> script_source =
- v8::String::NewExternal(isolate, resource);
- v8::Script::New(script_source, NULL, preparse, v8::Local<v8::String>());
+ v8::ScriptCompiler::Source script_source(
+ v8::String::NewExternal(isolate, resource),
+ new v8::ScriptCompiler::CachedData(
+ reinterpret_cast<const uint8_t*>(preparse->Data()),
+ preparse->Length()));
+ v8::ScriptCompiler::CompileUnbound(isolate, &script_source);
}
delete preparse;
i::FLAG_lazy = lazy_flag;
@@ -252,6 +260,99 @@ TEST(Preparsing) {
}
+TEST(PreparseFunctionDataIsUsed) {
+ // This tests that we actually do use the function data generated by the
+ // preparser.
+
+ // Make preparsing work for short scripts.
+ i::FLAG_min_preparse_length = 0;
+
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handles(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ int marker;
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
+ reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+
+ const char* good_code =
+ "function this_is_lazy() { var a; } function foo() { return 25; } foo();";
+
+ // Insert a syntax error inside the lazy function.
+ const char* bad_code =
+ "function this_is_lazy() { if ( } function foo() { return 25; } foo();";
+
+ v8::ScriptCompiler::Source good_source(v8_str(good_code));
+ v8::ScriptCompiler::Compile(isolate, &good_source,
+ v8::ScriptCompiler::kProduceDataToCache);
+
+ const v8::ScriptCompiler::CachedData* cached_data =
+ good_source.GetCachedData();
+ CHECK(cached_data->data != NULL);
+ CHECK_GT(cached_data->length, 0);
+
+ // Now compile the erroneous code with the good preparse data. If the preparse
+ // data is used, the lazy function is skipped and it should compile fine.
+ v8::ScriptCompiler::Source bad_source(
+ v8_str(bad_code), new v8::ScriptCompiler::CachedData(
+ cached_data->data, cached_data->length));
+ v8::Local<v8::Value> result =
+ v8::ScriptCompiler::Compile(isolate, &bad_source)->Run();
+ CHECK(result->IsInt32());
+ CHECK_EQ(25, result->Int32Value());
+}
+
+
+TEST(PreparseSymbolDataIsUsed) {
+ // This tests that we actually do use the symbol data generated by the
+ // preparser.
+
+ // Only do one compilation pass in this test (otherwise we will parse the
+ // source code again without preparse data and it will fail).
+ i::FLAG_crankshaft = false;
+
+ // Make preparsing work for short scripts.
+ i::FLAG_min_preparse_length = 0;
+
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handles(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ int marker;
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
+ reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+
+ // Note that the ( before function makes the function not lazily compiled.
+ const char* good_code =
+ "(function weird() { var foo = 26; return foo; })()";
+
+ // Insert an undefined identifier. If the preparser data is used, the symbol
+ // stream is used instead, and this identifier resolves to "foo".
+ const char* bad_code =
+ "(function weird() { var foo = 26; return wut; })()";
+
+ v8::ScriptCompiler::Source good_source(v8_str(good_code));
+ v8::ScriptCompiler::Compile(isolate, &good_source,
+ v8::ScriptCompiler::kProduceDataToCache);
+
+ const v8::ScriptCompiler::CachedData* cached_data =
+ good_source.GetCachedData();
+ CHECK(cached_data->data != NULL);
+ CHECK_GT(cached_data->length, 0);
+
+ // Now compile the erroneous code with the good preparse data. If the preparse
+ // data is used, we will see a second occurrence of "foo" instead of the
+ // unknown "wut".
+ v8::ScriptCompiler::Source bad_source(
+ v8_str(bad_code), new v8::ScriptCompiler::CachedData(
+ cached_data->data, cached_data->length));
+ v8::Local<v8::Value> result =
+ v8::ScriptCompiler::Compile(isolate, &bad_source)->Run();
+ CHECK(result->IsInt32());
+ CHECK_EQ(26, result->Int32Value());
+}
+
+
TEST(StandAlonePreParser) {
v8::V8::Initialize();
@@ -324,6 +425,99 @@ TEST(StandAlonePreParserNoNatives) {
}
+TEST(PreparsingObjectLiterals) {
+ // Regression test for a bug where the symbol stream produced by PreParser
+ // didn't match what Parser wanted to consume.
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handles(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ int marker;
+ CcTest::i_isolate()->stack_guard()->SetStackLimit(
+ reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+
+ {
+ const char* source = "var myo = {if: \"foo\"}; myo.if;";
+ v8::Local<v8::Value> result = PreCompileCompileRun(source);
+ CHECK(result->IsString());
+ v8::String::Utf8Value utf8(result);
+ CHECK_EQ("foo", *utf8);
+ }
+
+ {
+ const char* source = "var myo = {\"bar\": \"foo\"}; myo[\"bar\"];";
+ v8::Local<v8::Value> result = PreCompileCompileRun(source);
+ CHECK(result->IsString());
+ v8::String::Utf8Value utf8(result);
+ CHECK_EQ("foo", *utf8);
+ }
+
+ {
+ const char* source = "var myo = {1: \"foo\"}; myo[1];";
+ v8::Local<v8::Value> result = PreCompileCompileRun(source);
+ CHECK(result->IsString());
+ v8::String::Utf8Value utf8(result);
+ CHECK_EQ("foo", *utf8);
+ }
+}
+
+namespace v8 {
+namespace internal {
+
+struct CompleteParserRecorderFriend {
+ static void FakeWritingSymbolIdInPreParseData(CompleteParserRecorder* log,
+ int number) {
+ log->WriteNumber(number);
+ if (log->symbol_id_ < number + 1) {
+ log->symbol_id_ = number + 1;
+ }
+ }
+ static int symbol_position(CompleteParserRecorder* log) {
+ return log->symbol_store_.size();
+ }
+ static int symbol_ids(CompleteParserRecorder* log) {
+ return log->symbol_id_;
+ }
+ static int function_position(CompleteParserRecorder* log) {
+ return log->function_store_.size();
+ }
+};
+
+}
+}
+
+
+TEST(StoringNumbersInPreParseData) {
+ // Symbol IDs are split into chunks of 7 bits for storing. This is a
+ // regression test for a bug where a symbol id was incorrectly stored if some
+ // of the chunks in the middle were all zeros.
+ typedef i::CompleteParserRecorderFriend F;
+ i::CompleteParserRecorder log;
+ for (int i = 0; i < 18; ++i) {
+ F::FakeWritingSymbolIdInPreParseData(&log, 1 << i);
+ }
+ for (int i = 1; i < 18; ++i) {
+ F::FakeWritingSymbolIdInPreParseData(&log, (1 << i) + 1);
+ }
+ for (int i = 6; i < 18; ++i) {
+ F::FakeWritingSymbolIdInPreParseData(&log, (3 << i) + (5 << (i - 6)));
+ }
+ i::Vector<unsigned> store = log.ExtractData();
+ i::ScriptDataImpl script_data(store);
+ script_data.Initialize();
+ // Check that we get the same symbols back.
+ for (int i = 0; i < 18; ++i) {
+ CHECK_EQ(1 << i, script_data.GetSymbolIdentifier());
+ }
+ for (int i = 1; i < 18; ++i) {
+ CHECK_EQ((1 << i) + 1, script_data.GetSymbolIdentifier());
+ }
+ for (int i = 6; i < 18; ++i) {
+ CHECK_EQ((3 << i) + (5 << (i - 6)), script_data.GetSymbolIdentifier());
+ }
+}
+
+
TEST(RegressChromium62639) {
v8::V8::Initialize();
i::Isolate* isolate = CcTest::i_isolate();
@@ -713,6 +907,7 @@ void TestScanRegExp(const char* re_source, const char* expected) {
i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const i::byte*>(re_source),
static_cast<unsigned>(strlen(re_source)));
+ i::HandleScope scope(CcTest::i_isolate());
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
@@ -720,8 +915,12 @@ void TestScanRegExp(const char* re_source, const char* expected) {
CHECK(start == i::Token::DIV || start == i::Token::ASSIGN_DIV);
CHECK(scanner.ScanRegExpPattern(start == i::Token::ASSIGN_DIV));
scanner.Next(); // Current token is now the regexp literal.
- CHECK(scanner.is_literal_ascii());
- i::Vector<const char> actual = scanner.literal_ascii_string();
+ i::Handle<i::String> val =
+ scanner.AllocateInternalizedString(CcTest::i_isolate());
+ i::DisallowHeapAllocation no_alloc;
+ i::String::FlatContent content = val->GetFlatContent();
+ CHECK(content.IsAscii());
+ i::Vector<const uint8_t> actual = content.ToOneByteVector();
for (int i = 0; i < actual.length(); i++) {
CHECK_NE('\0', expected[i]);
CHECK_EQ(expected[i], actual[i]);
@@ -828,6 +1027,8 @@ static int Utf8LengthHelper(const char* s) {
TEST(ScopePositions) {
+ v8::internal::FLAG_harmony_scoping = true;
+
// Test the parser for correctly setting the start and end positions
// of a scope. We check the scope positions of exactly one scope
// nested in the global scope of a program. 'inner source' is the
@@ -839,167 +1040,167 @@ TEST(ScopePositions) {
const char* inner_source;
const char* outer_suffix;
i::ScopeType scope_type;
- i::LanguageMode language_mode;
+ i::StrictMode strict_mode;
};
const SourceData source_data[] = {
- { " with ({}) ", "{ block; }", " more;", i::WITH_SCOPE, i::CLASSIC_MODE },
- { " with ({}) ", "{ block; }", "; more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+ { " with ({}) ", "{ block; }", " more;", i::WITH_SCOPE, i::SLOPPY },
+ { " with ({}) ", "{ block; }", "; more;", i::WITH_SCOPE, i::SLOPPY },
{ " with ({}) ", "{\n"
" block;\n"
" }", "\n"
- " more;", i::WITH_SCOPE, i::CLASSIC_MODE },
- { " with ({}) ", "statement;", " more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+ " more;", i::WITH_SCOPE, i::SLOPPY },
+ { " with ({}) ", "statement;", " more;", i::WITH_SCOPE, i::SLOPPY },
{ " with ({}) ", "statement", "\n"
- " more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+ " more;", i::WITH_SCOPE, i::SLOPPY },
{ " with ({})\n"
" ", "statement;", "\n"
- " more;", i::WITH_SCOPE, i::CLASSIC_MODE },
+ " more;", i::WITH_SCOPE, i::SLOPPY },
{ " try {} catch ", "(e) { block; }", " more;",
- i::CATCH_SCOPE, i::CLASSIC_MODE },
+ i::CATCH_SCOPE, i::SLOPPY },
{ " try {} catch ", "(e) { block; }", "; more;",
- i::CATCH_SCOPE, i::CLASSIC_MODE },
+ i::CATCH_SCOPE, i::SLOPPY },
{ " try {} catch ", "(e) {\n"
" block;\n"
" }", "\n"
- " more;", i::CATCH_SCOPE, i::CLASSIC_MODE },
+ " more;", i::CATCH_SCOPE, i::SLOPPY },
{ " try {} catch ", "(e) { block; }", " finally { block; } more;",
- i::CATCH_SCOPE, i::CLASSIC_MODE },
+ i::CATCH_SCOPE, i::SLOPPY },
{ " start;\n"
- " ", "{ let block; }", " more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ " ", "{ let block; }", " more;", i::BLOCK_SCOPE, i::STRICT },
{ " start;\n"
- " ", "{ let block; }", "; more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ " ", "{ let block; }", "; more;", i::BLOCK_SCOPE, i::STRICT },
{ " start;\n"
" ", "{\n"
" let block;\n"
" }", "\n"
- " more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ " more;", i::BLOCK_SCOPE, i::STRICT },
{ " start;\n"
" function fun", "(a,b) { infunction; }", " more;",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
{ " start;\n"
" function fun", "(a,b) {\n"
" infunction;\n"
" }", "\n"
- " more;", i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ " more;", i::FUNCTION_SCOPE, i::SLOPPY },
{ " (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
{ " for ", "(let x = 1 ; x < 10; ++ x) { block; }", " more;",
- i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x = 1 ; x < 10; ++ x) { block; }", "; more;",
- i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x = 1 ; x < 10; ++ x) {\n"
" block;\n"
" }", "\n"
- " more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ " more;", i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x = 1 ; x < 10; ++ x) statement;", " more;",
- i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x = 1 ; x < 10; ++ x) statement", "\n"
- " more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ " more;", i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x = 1 ; x < 10; ++ x)\n"
" statement;", "\n"
- " more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ " more;", i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x in {}) { block; }", " more;",
- i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x in {}) { block; }", "; more;",
- i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x in {}) {\n"
" block;\n"
" }", "\n"
- " more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ " more;", i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x in {}) statement;", " more;",
- i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x in {}) statement", "\n"
- " more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ " more;", i::BLOCK_SCOPE, i::STRICT },
{ " for ", "(let x in {})\n"
" statement;", "\n"
- " more;", i::BLOCK_SCOPE, i::EXTENDED_MODE },
+ " more;", i::BLOCK_SCOPE, i::STRICT },
// Check that 6-byte and 4-byte encodings of UTF-8 strings do not throw
// the preparser off in terms of byte offsets.
// 6 byte encoding.
{ " 'foo\355\240\201\355\260\211';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// 4 byte encoding.
{ " 'foo\360\220\220\212';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// 3 byte encoding of \u0fff.
{ " 'foo\340\277\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Broken 6 byte encoding with missing last byte.
{ " 'foo\355\240\201\355\211';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Broken 3 byte encoding of \u0fff with missing last byte.
{ " 'foo\340\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Broken 3 byte encoding of \u0fff with missing 2 last bytes.
{ " 'foo\340';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Broken 3 byte encoding of \u00ff should be a 2 byte encoding.
{ " 'foo\340\203\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Broken 3 byte encoding of \u007f should be a 2 byte encoding.
{ " 'foo\340\201\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Unpaired lead surrogate.
{ " 'foo\355\240\201';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Unpaired lead surrogate where following code point is a 3 byte sequence.
{ " 'foo\355\240\201\340\277\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Unpaired lead surrogate where following code point is a 4 byte encoding
// of a trail surrogate.
{ " 'foo\355\240\201\360\215\260\211';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Unpaired trail surrogate.
{ " 'foo\355\260\211';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// 2 byte encoding of \u00ff.
{ " 'foo\303\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Broken 2 byte encoding of \u00ff with missing last byte.
{ " 'foo\303';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Broken 2 byte encoding of \u007f should be a 1 byte encoding.
{ " 'foo\301\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Illegal 5 byte encoding.
{ " 'foo\370\277\277\277\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Illegal 6 byte encoding.
{ " 'foo\374\277\277\277\277\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Illegal 0xfe byte
{ " 'foo\376\277\277\277\277\277\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
// Illegal 0xff byte
{ " 'foo\377\277\277\277\277\277\277\277';\n"
" (function fun", "(a,b) { infunction; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
{ " 'foo';\n"
" (function fun", "(a,b) { 'bar\355\240\201\355\260\213'; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
+ i::FUNCTION_SCOPE, i::SLOPPY },
{ " 'foo';\n"
" (function fun", "(a,b) { 'bar\360\220\220\214'; }", ")();",
- i::FUNCTION_SCOPE, i::CLASSIC_MODE },
- { NULL, NULL, NULL, i::EVAL_SCOPE, i::CLASSIC_MODE }
+ i::FUNCTION_SCOPE, i::SLOPPY },
+ { NULL, NULL, NULL, i::EVAL_SCOPE, i::SLOPPY }
};
i::Isolate* isolate = CcTest::i_isolate();
@@ -1038,7 +1239,7 @@ TEST(ScopePositions) {
parser.set_allow_lazy(true);
parser.set_allow_harmony_scoping(true);
info.MarkAsGlobal();
- info.SetLanguageMode(source_data[i].language_mode);
+ info.SetStrictMode(source_data[i].strict_mode);
parser.Parse();
CHECK(info.function() != NULL);
@@ -1071,7 +1272,7 @@ i::Handle<i::String> FormatMessage(i::ScriptDataImpl* data) {
i::JSArray::SetElement(
args_array, i, v8::Utils::OpenHandle(*v8::String::NewFromUtf8(
CcTest::isolate(), args[i])),
- NONE, i::kNonStrictMode);
+ NONE, i::SLOPPY);
}
i::Handle<i::JSObject> builtins(isolate->js_builtins_object());
i::Handle<i::Object> format_fun =
@@ -1108,8 +1309,9 @@ enum ParserSyncTestResult {
kError
};
-
-void SetParserFlags(i::ParserBase* parser, i::EnumSet<ParserFlag> flags) {
+template <typename Traits>
+void SetParserFlags(i::ParserBase<Traits>* parser,
+ i::EnumSet<ParserFlag> flags) {
parser->set_allow_lazy(flags.Contains(kAllowLazy));
parser->set_allow_natives_syntax(flags.Contains(kAllowNativesSyntax));
parser->set_allow_harmony_scoping(flags.Contains(kAllowHarmonyScoping));
@@ -1379,7 +1581,9 @@ TEST(PreparserStrictOctal) {
void RunParserSyncTest(const char* context_data[][2],
const char* statement_data[],
- ParserSyncTestResult result) {
+ ParserSyncTestResult result,
+ const ParserFlag* flags = NULL,
+ int flags_len = 0) {
v8::HandleScope handles(CcTest::isolate());
v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
v8::Context::Scope context_scope(context);
@@ -1388,10 +1592,14 @@ void RunParserSyncTest(const char* context_data[][2],
CcTest::i_isolate()->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
- static const ParserFlag flags[] = {
+ static const ParserFlag default_flags[] = {
kAllowLazy, kAllowHarmonyScoping, kAllowModules, kAllowGenerators,
- kAllowForOf
+ kAllowForOf, kAllowNativesSyntax
};
+ if (!flags) {
+ flags = default_flags;
+ flags_len = ARRAY_SIZE(default_flags);
+ }
for (int i = 0; context_data[i][0] != NULL; ++i) {
for (int j = 0; statement_data[j] != NULL; ++j) {
int kPrefixLen = i::StrLength(context_data[i][0]);
@@ -1409,7 +1617,7 @@ void RunParserSyncTest(const char* context_data[][2],
CHECK(length == kProgramSize);
TestParserSync(program.start(),
flags,
- ARRAY_SIZE(flags),
+ flags_len,
result);
}
}
@@ -1455,7 +1663,7 @@ TEST(ErrorsEvalAndArguments) {
}
-TEST(NoErrorsEvalAndArgumentsClassic) {
+TEST(NoErrorsEvalAndArgumentsSloppy) {
// Tests that both preparsing and parsing accept "eval" and "arguments" as
// identifiers when needed.
const char* context_data[][2] = {
@@ -1600,8 +1808,8 @@ TEST(ErrorsReservedWords) {
}
-TEST(NoErrorsYieldClassic) {
- // In classic mode, it's okay to use "yield" as identifier, *except* inside a
+TEST(NoErrorsYieldSloppy) {
+ // In sloppy mode, it's okay to use "yield" as identifier, *except* inside a
// generator (see next test).
const char* context_data[][2] = {
{ "", "" },
@@ -1627,7 +1835,7 @@ TEST(NoErrorsYieldClassic) {
}
-TEST(ErrorsYieldClassicGenerator) {
+TEST(ErrorsYieldSloppyGenerator) {
const char* context_data[][2] = {
{ "function * is_gen() {", "}" },
{ NULL, NULL }
@@ -1743,7 +1951,7 @@ TEST(NoErrorsNameOfStrictFunction) {
-TEST(ErrorsIllegalWordsAsLabelsClassic) {
+TEST(ErrorsIllegalWordsAsLabelsSloppy) {
// Using future reserved words as labels is always an error.
const char* context_data[][2] = {
{ "", ""},
@@ -1880,7 +2088,6 @@ TEST(DontRegressPreParserDataSizes) {
// These tests make sure that PreParser doesn't start producing less data.
v8::V8::Initialize();
-
int marker;
CcTest::i_isolate()->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
@@ -1890,9 +2097,18 @@ TEST(DontRegressPreParserDataSizes) {
int symbols;
int functions;
} test_cases[] = {
- // Labels, variables and functions are recorded as symbols.
+ // Labels and variables are recorded as symbols.
{"{label: 42}", 1, 0}, {"{label: 42; label2: 43}", 2, 0},
{"var x = 42;", 1, 0}, {"var x = 42, y = 43;", 2, 0},
+ {"var x = {y: 1};", 2, 0},
+ {"var x = {}; x.y = 1", 2, 0},
+ // "get" is recorded as a symbol too.
+ {"var x = {get foo(){} };", 3, 1},
+ // When keywords are used as identifiers, they're logged as symbols, too:
+ {"var x = {if: 1};", 2, 0},
+ {"var x = {}; x.if = 1", 2, 0},
+ {"var x = {get if(){} };", 3, 1},
+ // Functions
{"function foo() {}", 1, 1}, {"function foo() {} function bar() {}", 2, 2},
// Labels, variables and functions insize lazy functions are not recorded.
{"function lazy() { var a, b, c; }", 1, 1},
@@ -1904,6 +2120,7 @@ TEST(DontRegressPreParserDataSizes) {
// Each function adds 5 elements to the preparse function data.
const int kDataPerFunction = 5;
+ typedef i::CompleteParserRecorderFriend F;
uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
for (int i = 0; test_cases[i].program; i++) {
const char* program = test_cases[i].program;
@@ -1919,21 +2136,22 @@ TEST(DontRegressPreParserDataSizes) {
preparser.set_allow_natives_syntax(true);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
- if (log.symbol_ids() != test_cases[i].symbols) {
+ if (F::symbol_ids(&log) != test_cases[i].symbols) {
i::OS::Print(
"Expected preparse data for program:\n"
"\t%s\n"
"to contain %d symbols, however, received %d symbols.\n",
- program, test_cases[i].symbols, log.symbol_ids());
+ program, test_cases[i].symbols, F::symbol_ids(&log));
CHECK(false);
}
- if (log.function_position() != test_cases[i].functions * kDataPerFunction) {
+ if (F::function_position(&log) !=
+ test_cases[i].functions * kDataPerFunction) {
i::OS::Print(
"Expected preparse data for program:\n"
"\t%s\n"
"to contain %d functions, however, received %d functions.\n",
program, test_cases[i].functions,
- log.function_position() / kDataPerFunction);
+ F::function_position(&log) / kDataPerFunction);
CHECK(false);
}
i::ScriptDataImpl data(log.ExtractData());
@@ -2013,3 +2231,366 @@ TEST(NoErrorsTryCatchFinally) {
RunParserSyncTest(context_data, statement_data, kSuccess);
}
+
+
+TEST(ErrorsRegexpLiteral) {
+ const char* context_data[][2] = {
+ {"var r = ", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "/unterminated",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
+TEST(NoErrorsRegexpLiteral) {
+ const char* context_data[][2] = {
+ {"var r = ", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "/foo/",
+ "/foo/g",
+ "/foo/whatever", // This is an error but not detected by the parser.
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+TEST(Intrinsics) {
+ const char* context_data[][2] = {
+ {"", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "%someintrinsic(arg)",
+ NULL
+ };
+
+ // Parsing will fail or succeed depending on whether we allow natives syntax
+ // or not.
+ RunParserSyncTest(context_data, statement_data, kSuccessOrError);
+}
+
+
+TEST(NoErrorsNewExpression) {
+ const char* context_data[][2] = {
+ {"", ""},
+ {"var f =", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "new foo",
+ "new foo();",
+ "new foo(1);",
+ "new foo(1, 2);",
+ // The first () will be processed as a part of the NewExpression and the
+ // second () will be processed as part of LeftHandSideExpression.
+ "new foo()();",
+ // The first () will be processed as a part of the inner NewExpression and
+ // the second () will be processed as a part of the outer NewExpression.
+ "new new foo()();",
+ "new foo.bar;",
+ "new foo.bar();",
+ "new foo.bar.baz;",
+ "new foo.bar().baz;",
+ "new foo[bar];",
+ "new foo[bar]();",
+ "new foo[bar][baz];",
+ "new foo[bar]()[baz];",
+ "new foo[bar].baz(baz)()[bar].baz;",
+ "new \"foo\"", // Runtime error
+ "new 1", // Runtime error
+ // This even runs:
+ "(new new Function(\"this.x = 1\")).x;",
+ "new new Test_Two(String, 2).v(0123).length;",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+TEST(ErrorsNewExpression) {
+ const char* context_data[][2] = {
+ {"", ""},
+ {"var f =", ""},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "new foo bar",
+ "new ) foo",
+ "new ++foo",
+ "new foo ++",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
+TEST(StrictObjectLiteralChecking) {
+ const char* strict_context_data[][2] = {
+ {"\"use strict\"; var myobject = {", "};"},
+ { NULL, NULL }
+ };
+ const char* non_strict_context_data[][2] = {
+ {"var myobject = {", "};"},
+ { NULL, NULL }
+ };
+
+ // These are only errors in strict mode.
+ const char* statement_data[] = {
+ "foo: 1, foo: 2",
+ "\"foo\": 1, \"foo\": 2",
+ "foo: 1, \"foo\": 2",
+ "1: 1, 1: 2",
+ "1: 1, \"1\": 2",
+ "get: 1, get: 2", // Not a getter for real, just a property called get.
+ "set: 1, set: 2", // Not a setter for real, just a property called set.
+ NULL
+ };
+
+ RunParserSyncTest(non_strict_context_data, statement_data, kSuccess);
+ RunParserSyncTest(strict_context_data, statement_data, kError);
+}
+
+
+TEST(ErrorsObjectLiteralChecking) {
+ const char* context_data[][2] = {
+ {"\"use strict\"; var myobject = {", "};"},
+ {"var myobject = {", "};"},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "foo: 1, get foo() {}",
+ "foo: 1, set foo() {}",
+ "\"foo\": 1, get \"foo\"() {}",
+ "\"foo\": 1, set \"foo\"() {}",
+ "1: 1, get 1() {}",
+ "1: 1, set 1() {}",
+ // It's counter-intuitive, but these collide too (even in classic
+ // mode). Note that we can have "foo" and foo as properties in classic mode,
+ // but we cannot have "foo" and get foo, or foo and get "foo".
+ "foo: 1, get \"foo\"() {}",
+ "foo: 1, set \"foo\"() {}",
+ "\"foo\": 1, get foo() {}",
+ "\"foo\": 1, set foo() {}",
+ "1: 1, get \"1\"() {}",
+ "1: 1, set \"1\"() {}",
+ "\"1\": 1, get 1() {}"
+ "\"1\": 1, set 1() {}"
+ // Parsing FunctionLiteral for getter or setter fails
+ "get foo( +",
+ "get foo() \"error\"",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
+TEST(NoErrorsObjectLiteralChecking) {
+ const char* context_data[][2] = {
+ {"var myobject = {", "};"},
+ {"\"use strict\"; var myobject = {", "};"},
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "foo: 1, bar: 2",
+ "\"foo\": 1, \"bar\": 2",
+ "1: 1, 2: 2",
+ // Syntax: IdentifierName ':' AssignmentExpression
+ "foo: bar = 5 + baz",
+ // Syntax: 'get' (IdentifierName | String | Number) FunctionLiteral
+ "get foo() {}",
+ "get \"foo\"() {}",
+ "get 1() {}",
+ // Syntax: 'set' (IdentifierName | String | Number) FunctionLiteral
+ "set foo() {}",
+ "set \"foo\"() {}",
+ "set 1() {}",
+ // Non-colliding getters and setters -> no errors
+ "foo: 1, get bar() {}",
+ "foo: 1, set bar(b) {}",
+ "\"foo\": 1, get \"bar\"() {}",
+ "\"foo\": 1, set \"bar\"() {}",
+ "1: 1, get 2() {}",
+ "1: 1, set 2() {}",
+ // Weird number of parameters -> no errors
+ "get bar() {}, set bar() {}",
+ "get bar(x) {}, set bar(x) {}",
+ "get bar(x, y) {}, set bar(x, y) {}",
+ // Keywords, future reserved and strict future reserved are also allowed as
+ // property names.
+ "if: 4",
+ "interface: 5",
+ "super: 6",
+ "eval: 7",
+ "arguments: 8",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+TEST(TooManyArguments) {
+ const char* context_data[][2] = {
+ {"foo(", "0)"},
+ { NULL, NULL }
+ };
+
+ using v8::internal::Code;
+ char statement[Code::kMaxArguments * 2 + 1];
+ for (int i = 0; i < Code::kMaxArguments; ++i) {
+ statement[2 * i] = '0';
+ statement[2 * i + 1] = ',';
+ }
+ statement[Code::kMaxArguments * 2] = 0;
+
+ const char* statement_data[] = {
+ statement,
+ NULL
+ };
+
+ // The test is quite slow, so run it with a reduced set of flags.
+ static const ParserFlag empty_flags[] = {kAllowLazy};
+ RunParserSyncTest(context_data, statement_data, kError, empty_flags, 1);
+}
+
+
+TEST(StrictDelete) {
+ // "delete <Identifier>" is not allowed in strict mode.
+ const char* strict_context_data[][2] = {
+ {"\"use strict\"; ", ""},
+ { NULL, NULL }
+ };
+
+ const char* sloppy_context_data[][2] = {
+ {"", ""},
+ { NULL, NULL }
+ };
+
+ // These are errors in the strict mode.
+ const char* sloppy_statement_data[] = {
+ "delete foo;",
+ "delete foo + 1;",
+ "delete (foo);",
+ "delete eval;",
+ "delete interface;",
+ NULL
+ };
+
+ // These are always OK
+ const char* good_statement_data[] = {
+ "delete this;",
+ "delete 1;",
+ "delete 1 + 2;",
+ "delete foo();",
+ "delete foo.bar;",
+ "delete foo[bar];",
+ "delete foo--;",
+ "delete --foo;",
+ "delete new foo();",
+ "delete new foo(bar);",
+ NULL
+ };
+
+ // These are always errors
+ const char* bad_statement_data[] = {
+ "delete if;",
+ NULL
+ };
+
+ RunParserSyncTest(strict_context_data, sloppy_statement_data, kError);
+ RunParserSyncTest(sloppy_context_data, sloppy_statement_data, kSuccess);
+
+ RunParserSyncTest(strict_context_data, good_statement_data, kSuccess);
+ RunParserSyncTest(sloppy_context_data, good_statement_data, kSuccess);
+
+ RunParserSyncTest(strict_context_data, bad_statement_data, kError);
+ RunParserSyncTest(sloppy_context_data, bad_statement_data, kError);
+}
+
+
+TEST(InvalidLeftHandSide) {
+ const char* assignment_context_data[][2] = {
+ {"", " = 1;"},
+ {"\"use strict\"; ", " = 1;"},
+ { NULL, NULL }
+ };
+
+ const char* prefix_context_data[][2] = {
+ {"++", ";"},
+ {"\"use strict\"; ++", ";"},
+ {NULL, NULL},
+ };
+
+ const char* postfix_context_data[][2] = {
+ {"", "++;"},
+ {"\"use strict\"; ", "++;"},
+ { NULL, NULL }
+ };
+
+ // Good left hand sides for assigment or prefix / postfix operations.
+ const char* good_statement_data[] = {
+ "foo",
+ "foo.bar",
+ "foo[bar]",
+ "foo()[bar]",
+ "foo().bar",
+ "this.foo",
+ "this[foo]",
+ "new foo()[bar]",
+ "new foo().bar",
+ NULL
+ };
+
+ // Bad left hand sides for assigment or prefix / postfix operations.
+ const char* bad_statement_data_common[] = {
+ "2",
+ "foo()",
+ "null",
+ "if", // Unexpected token
+ "{x: 1}", // Unexpected token
+ "this",
+ "\"bar\"",
+ "(foo + bar)",
+ "new new foo()[bar]", // means: new (new foo()[bar])
+ "new new foo().bar", // means: new (new foo()[bar])
+ NULL
+ };
+
+ // These are not okay for assignment, but okay for prefix / postix.
+ const char* bad_statement_data_for_assignment[] = {
+ "++foo",
+ "foo++",
+ "foo + bar",
+ NULL
+ };
+
+ RunParserSyncTest(assignment_context_data, good_statement_data, kSuccess);
+ RunParserSyncTest(assignment_context_data, bad_statement_data_common, kError);
+ RunParserSyncTest(assignment_context_data, bad_statement_data_for_assignment,
+ kError);
+
+ RunParserSyncTest(prefix_context_data, good_statement_data, kSuccess);
+ RunParserSyncTest(prefix_context_data, bad_statement_data_common, kError);
+
+ RunParserSyncTest(postfix_context_data, good_statement_data, kSuccess);
+ RunParserSyncTest(postfix_context_data, bad_statement_data_common, kError);
+}
diff --git a/deps/v8/test/cctest/test-platform.cc b/deps/v8/test/cctest/test-platform.cc
index eca0ab72a..b9f8bafe4 100644
--- a/deps/v8/test/cctest/test-platform.cc
+++ b/deps/v8/test/cctest/test-platform.cc
@@ -53,6 +53,12 @@ using namespace ::v8::internal;
do { \
ASM("str %%sp, %0" : "=g" (sp_addr)); \
} while (0)
+#elif defined(__AARCH64EL__)
+#define GET_STACK_POINTER() \
+ static int sp_addr = 0; \
+ do { \
+ ASM("mov x16, sp; str x16, %0" : "=g" (sp_addr)); \
+ } while (0)
#elif defined(__MIPSEL__)
#define GET_STACK_POINTER() \
static int sp_addr = 0; \
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index d0193520f..712fec056 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -49,6 +49,11 @@
#include "arm/macro-assembler-arm.h"
#include "arm/regexp-macro-assembler-arm.h"
#endif
+#if V8_TARGET_ARCH_ARM64
+#include "arm64/assembler-arm64.h"
+#include "arm64/macro-assembler-arm64.h"
+#include "arm64/regexp-macro-assembler-arm64.h"
+#endif
#if V8_TARGET_ARCH_MIPS
#include "mips/assembler-mips.h"
#include "mips/macro-assembler-mips.h"
@@ -444,27 +449,15 @@ static bool NotDigit(uc16 c) {
}
-static bool IsWhiteSpace(uc16 c) {
- switch (c) {
- case 0x09:
- case 0x0A:
- case 0x0B:
- case 0x0C:
- case 0x0d:
- case 0x20:
- case 0xA0:
- case 0x2028:
- case 0x2029:
- case 0xFEFF:
- return true;
- default:
- return unibrow::Space::Is(c);
- }
+static bool IsWhiteSpaceOrLineTerminator(uc16 c) {
+ // According to ECMA 5.1, 15.10.2.12 the CharacterClassEscape \s includes
+ // WhiteSpace (7.2) and LineTerminator (7.3) values.
+ return v8::internal::WhiteSpaceOrLineTerminator::Is(c);
}
-static bool NotWhiteSpace(uc16 c) {
- return !IsWhiteSpace(c);
+static bool NotWhiteSpaceNorLineTermiantor(uc16 c) {
+ return !IsWhiteSpaceOrLineTerminator(c);
}
@@ -494,8 +487,8 @@ TEST(CharacterClassEscapes) {
TestCharacterClassEscapes('.', IsRegExpNewline);
TestCharacterClassEscapes('d', IsDigit);
TestCharacterClassEscapes('D', NotDigit);
- TestCharacterClassEscapes('s', IsWhiteSpace);
- TestCharacterClassEscapes('S', NotWhiteSpace);
+ TestCharacterClassEscapes('s', IsWhiteSpaceOrLineTerminator);
+ TestCharacterClassEscapes('S', NotWhiteSpaceNorLineTermiantor);
TestCharacterClassEscapes('w', IsRegExpWord);
TestCharacterClassEscapes('W', NotWord);
}
@@ -701,6 +694,8 @@ typedef RegExpMacroAssemblerIA32 ArchRegExpMacroAssembler;
typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_ARM
typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
+#elif V8_TARGET_ARCH_ARM64
+typedef RegExpMacroAssemblerARM64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS
typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
#endif
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 4b31e614d..6ff52003b 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -661,7 +661,7 @@ void TestStringCharacterStream(BuildString build, int test_cases) {
for (int i = 0; i < test_cases; i++) {
printf("%d\n", i);
HandleScope inner_scope(isolate);
- AlwaysAllocateScope always_allocate;
+ AlwaysAllocateScope always_allocate(isolate);
// Build flat version of cons string.
Handle<String> flat_string = build(i, &data);
ConsStringStats flat_string_stats;
@@ -1209,24 +1209,17 @@ TEST(AsciiArrayJoin) {
// starting with 'bad', followed by 2^14 times the string s. That means the
// total length of the concatenated strings is 2^31 + 3. So on 32bit systems
// summing the lengths of the strings (as Smis) overflows and wraps.
- static const char* join_causing_out_of_memory =
+ LocalContext context;
+ v8::HandleScope scope(CcTest::isolate());
+ v8::TryCatch try_catch;
+ CHECK(CompileRun(
"var two_14 = Math.pow(2, 14);"
"var two_17 = Math.pow(2, 17);"
"var s = Array(two_17 + 1).join('c');"
"var a = ['bad'];"
"for (var i = 1; i <= two_14; i++) a.push(s);"
- "a.join("");";
-
- v8::HandleScope scope(CcTest::isolate());
- LocalContext context;
- v8::V8::IgnoreOutOfMemoryException();
- v8::Local<v8::Script> script = v8::Script::Compile(
- v8::String::NewFromUtf8(CcTest::isolate(), join_causing_out_of_memory));
- v8::Local<v8::Value> result = script->Run();
-
- // Check for out of memory state.
- CHECK(result.IsEmpty());
- CHECK(context->HasOutOfMemoryException());
+ "a.join("");").IsEmpty());
+ CHECK(try_catch.HasCaught());
}
@@ -1282,23 +1275,6 @@ TEST(RobustSubStringStub) {
}
-TEST(RegExpOverflow) {
- // Result string has the length 2^32, causing a 32-bit integer overflow.
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- LocalContext context;
- v8::V8::IgnoreOutOfMemoryException();
- v8::Local<v8::Value> result = CompileRun(
- "var a = 'a'; "
- "for (var i = 0; i < 16; i++) { "
- " a += a; "
- "} "
- "a.replace(/a/g, a); ");
- CHECK(result.IsEmpty());
- CHECK(context->HasOutOfMemoryException());
-}
-
-
TEST(StringReplaceAtomTwoByteResult) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -1376,3 +1352,62 @@ TEST(Latin1IgnoreCase) {
CHECK_EQ(Min(upper, lower), test);
}
}
+
+
+class DummyResource: public v8::String::ExternalStringResource {
+ public:
+ virtual const uint16_t* data() const { return NULL; }
+ virtual size_t length() const { return 1 << 30; }
+};
+
+
+class DummyOneByteResource: public v8::String::ExternalOneByteStringResource {
+ public:
+ virtual const char* data() const { return NULL; }
+ virtual size_t length() const { return 1 << 30; }
+};
+
+
+TEST(InvalidExternalString) {
+ CcTest::InitializeVM();
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ { HandleScope scope(isolate);
+ DummyOneByteResource r;
+ CHECK(isolate->factory()->NewExternalStringFromAscii(&r).is_null());
+ CHECK(isolate->has_pending_exception());
+ isolate->clear_pending_exception();
+ }
+
+ { HandleScope scope(isolate);
+ DummyResource r;
+ CHECK(isolate->factory()->NewExternalStringFromTwoByte(&r).is_null());
+ CHECK(isolate->has_pending_exception());
+ isolate->clear_pending_exception();
+ }
+}
+
+
+#define INVALID_STRING_TEST(FUN, TYPE) \
+ TEST(StringOOM##FUN) { \
+ CcTest::InitializeVM(); \
+ LocalContext context; \
+ Isolate* isolate = CcTest::i_isolate(); \
+ STATIC_ASSERT(String::kMaxLength < kMaxInt); \
+ static const int invalid = String::kMaxLength + 1; \
+ HandleScope scope(isolate); \
+ Vector<TYPE> dummy = Vector<TYPE>::New(invalid); \
+ CHECK(isolate->factory()->FUN(Vector<const TYPE>::cast(dummy)).is_null()); \
+ memset(dummy.start(), 0x20, dummy.length() * sizeof(TYPE)); \
+ CHECK(isolate->has_pending_exception()); \
+ isolate->clear_pending_exception(); \
+ dummy.Dispose(); \
+ }
+
+INVALID_STRING_TEST(NewStringFromAscii, char)
+INVALID_STRING_TEST(NewStringFromUtf8, char)
+INVALID_STRING_TEST(NewStringFromOneByte, uint8_t)
+INVALID_STRING_TEST(InternalizeOneByteString, uint8_t)
+INVALID_STRING_TEST(InternalizeUtf8String, char)
+
+#undef INVALID_STRING_TEST
diff --git a/deps/v8/test/cctest/test-symbols.cc b/deps/v8/test/cctest/test-symbols.cc
index a04ffa70c..6fceea613 100644
--- a/deps/v8/test/cctest/test-symbols.cc
+++ b/deps/v8/test/cctest/test-symbols.cc
@@ -37,7 +37,7 @@ TEST(Create) {
#endif
}
- CcTest::heap()->PerformScavenge();
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
// All symbols should be distinct.
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index d29ee4110..326bd1b56 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -34,6 +34,8 @@ template<class Type, class TypeHandle, class Region>
class Types {
public:
Types(Region* region, Isolate* isolate) :
+ Representation(Type::Representation(region)),
+ Semantic(Type::Semantic(region)),
None(Type::None(region)),
Any(Type::Any(region)),
Oddball(Type::Oddball(region)),
@@ -41,9 +43,9 @@ class Types {
Null(Type::Null(region)),
Undefined(Type::Undefined(region)),
Number(Type::Number(region)),
- Smi(Type::Smi(region)),
+ SignedSmall(Type::SignedSmall(region)),
Signed32(Type::Signed32(region)),
- Double(Type::Double(region)),
+ Float(Type::Float(region)),
Name(Type::Name(region)),
UniqueName(Type::UniqueName(region)),
String(Type::String(region)),
@@ -72,6 +74,8 @@ class Types {
ArrayConstant2 = Type::Constant(array, region);
}
+ TypeHandle Representation;
+ TypeHandle Semantic;
TypeHandle None;
TypeHandle Any;
TypeHandle Oddball;
@@ -79,9 +83,9 @@ class Types {
TypeHandle Null;
TypeHandle Undefined;
TypeHandle Number;
- TypeHandle Smi;
+ TypeHandle SignedSmall;
TypeHandle Signed32;
- TypeHandle Double;
+ TypeHandle Float;
TypeHandle Name;
TypeHandle UniqueName;
TypeHandle String;
@@ -190,10 +194,10 @@ struct ZoneRep {
return static_cast<int>(reinterpret_cast<intptr_t>(t) >> 1);
}
static Map* AsClass(Type* t) {
- return *reinterpret_cast<Map**>(AsTagged(t)->at(1));
+ return *reinterpret_cast<Map**>(AsTagged(t)->at(2));
}
static Object* AsConstant(Type* t) {
- return *reinterpret_cast<Object**>(AsTagged(t)->at(1));
+ return *reinterpret_cast<Object**>(AsTagged(t)->at(2));
}
static ZoneList<Type*>* AsUnion(Type* t) {
return reinterpret_cast<ZoneList<Type*>*>(AsTagged(t));
@@ -236,7 +240,7 @@ struct Tests : Rep {
T(Rep::ToRegion(&zone, isolate), isolate) {
}
- static void CheckEqual(TypeHandle type1, TypeHandle type2) {
+ void CheckEqual(TypeHandle type1, TypeHandle type2) {
CHECK_EQ(Rep::IsBitset(type1), Rep::IsBitset(type2));
CHECK_EQ(Rep::IsClass(type1), Rep::IsClass(type2));
CHECK_EQ(Rep::IsConstant(type1), Rep::IsConstant(type2));
@@ -256,7 +260,7 @@ struct Tests : Rep {
CHECK(type2->Is(type1));
}
- static void CheckSub(TypeHandle type1, TypeHandle type2) {
+ void CheckSub(TypeHandle type1, TypeHandle type2) {
CHECK(type1->Is(type2));
CHECK(!type2->Is(type1));
if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
@@ -264,7 +268,7 @@ struct Tests : Rep {
}
}
- static void CheckUnordered(TypeHandle type1, TypeHandle type2) {
+ void CheckUnordered(TypeHandle type1, TypeHandle type2) {
CHECK(!type1->Is(type2));
CHECK(!type2->Is(type1));
if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
@@ -272,21 +276,23 @@ struct Tests : Rep {
}
}
- static void CheckOverlap(TypeHandle type1, TypeHandle type2) {
+ void CheckOverlap(TypeHandle type1, TypeHandle type2, TypeHandle mask) {
CHECK(type1->Maybe(type2));
CHECK(type2->Maybe(type1));
if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
- CHECK_NE(0, Rep::AsBitset(type1) & Rep::AsBitset(type2));
+ CHECK_NE(0,
+ Rep::AsBitset(type1) & Rep::AsBitset(type2) & Rep::AsBitset(mask));
}
}
- static void CheckDisjoint(TypeHandle type1, TypeHandle type2) {
+ void CheckDisjoint(TypeHandle type1, TypeHandle type2, TypeHandle mask) {
CHECK(!type1->Is(type2));
CHECK(!type2->Is(type1));
CHECK(!type1->Maybe(type2));
CHECK(!type2->Maybe(type1));
if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
- CHECK_EQ(0, Rep::AsBitset(type1) & Rep::AsBitset(type2));
+ CHECK_EQ(0,
+ Rep::AsBitset(type1) & Rep::AsBitset(type2) & Rep::AsBitset(mask));
}
}
@@ -300,10 +306,12 @@ struct Tests : Rep {
CHECK(this->IsBitset(T.Union(T.String, T.Receiver)));
CHECK_EQ(0, this->AsBitset(T.None));
- CHECK_EQ(this->AsBitset(T.Number) | this->AsBitset(T.String),
- this->AsBitset(T.Union(T.String, T.Number)));
- CHECK_EQ(this->AsBitset(T.Receiver),
- this->AsBitset(T.Union(T.Receiver, T.Object)));
+ CHECK_EQ(
+ this->AsBitset(T.Number) | this->AsBitset(T.String),
+ this->AsBitset(T.Union(T.String, T.Number)));
+ CHECK_EQ(
+ this->AsBitset(T.Receiver),
+ this->AsBitset(T.Union(T.Receiver, T.Object)));
}
void Class() {
@@ -352,12 +360,12 @@ struct Tests : Rep {
CheckUnordered(T.Boolean, T.Undefined);
CheckSub(T.Number, T.Any);
- CheckSub(T.Smi, T.Number);
+ CheckSub(T.SignedSmall, T.Number);
CheckSub(T.Signed32, T.Number);
- CheckSub(T.Double, T.Number);
- CheckSub(T.Smi, T.Signed32);
- CheckUnordered(T.Smi, T.Double);
- CheckUnordered(T.Signed32, T.Double);
+ CheckSub(T.Float, T.Number);
+ CheckSub(T.SignedSmall, T.Signed32);
+ CheckUnordered(T.SignedSmall, T.Float);
+ CheckUnordered(T.Signed32, T.Float);
CheckSub(T.Name, T.Any);
CheckSub(T.UniqueName, T.Any);
@@ -391,7 +399,7 @@ struct Tests : Rep {
CheckSub(T.ArrayClass, T.Object);
CheckUnordered(T.ObjectClass, T.ArrayClass);
- CheckSub(T.SmiConstant, T.Smi);
+ CheckSub(T.SmiConstant, T.SignedSmall);
CheckSub(T.SmiConstant, T.Signed32);
CheckSub(T.SmiConstant, T.Number);
CheckSub(T.ObjectConstant1, T.Object);
@@ -409,71 +417,71 @@ struct Tests : Rep {
}
void Maybe() {
- CheckOverlap(T.Any, T.Any);
- CheckOverlap(T.Object, T.Object);
-
- CheckOverlap(T.Oddball, T.Any);
- CheckOverlap(T.Boolean, T.Oddball);
- CheckOverlap(T.Null, T.Oddball);
- CheckOverlap(T.Undefined, T.Oddball);
- CheckDisjoint(T.Boolean, T.Null);
- CheckDisjoint(T.Undefined, T.Null);
- CheckDisjoint(T.Boolean, T.Undefined);
-
- CheckOverlap(T.Number, T.Any);
- CheckOverlap(T.Smi, T.Number);
- CheckOverlap(T.Double, T.Number);
- CheckDisjoint(T.Signed32, T.Double);
-
- CheckOverlap(T.Name, T.Any);
- CheckOverlap(T.UniqueName, T.Any);
- CheckOverlap(T.UniqueName, T.Name);
- CheckOverlap(T.String, T.Name);
- CheckOverlap(T.InternalizedString, T.String);
- CheckOverlap(T.InternalizedString, T.UniqueName);
- CheckOverlap(T.InternalizedString, T.Name);
- CheckOverlap(T.Symbol, T.UniqueName);
- CheckOverlap(T.Symbol, T.Name);
- CheckOverlap(T.String, T.UniqueName);
- CheckDisjoint(T.String, T.Symbol);
- CheckDisjoint(T.InternalizedString, T.Symbol);
-
- CheckOverlap(T.Receiver, T.Any);
- CheckOverlap(T.Object, T.Any);
- CheckOverlap(T.Object, T.Receiver);
- CheckOverlap(T.Array, T.Object);
- CheckOverlap(T.Function, T.Object);
- CheckOverlap(T.Proxy, T.Receiver);
- CheckDisjoint(T.Object, T.Proxy);
- CheckDisjoint(T.Array, T.Function);
-
- CheckOverlap(T.ObjectClass, T.Any);
- CheckOverlap(T.ObjectConstant1, T.Any);
-
- CheckOverlap(T.ObjectClass, T.Object);
- CheckOverlap(T.ArrayClass, T.Object);
- CheckOverlap(T.ObjectClass, T.ObjectClass);
- CheckOverlap(T.ArrayClass, T.ArrayClass);
- CheckDisjoint(T.ObjectClass, T.ArrayClass);
-
- CheckOverlap(T.SmiConstant, T.Smi);
- CheckOverlap(T.SmiConstant, T.Signed32);
- CheckOverlap(T.SmiConstant, T.Number);
- CheckDisjoint(T.SmiConstant, T.Double);
- CheckOverlap(T.ObjectConstant1, T.Object);
- CheckOverlap(T.ObjectConstant2, T.Object);
- CheckOverlap(T.ArrayConstant1, T.Object);
- CheckOverlap(T.ArrayConstant1, T.Array);
- CheckOverlap(T.ArrayConstant1, T.ArrayConstant2);
- CheckOverlap(T.ObjectConstant1, T.ObjectConstant1);
- CheckDisjoint(T.ObjectConstant1, T.ObjectConstant2);
- CheckDisjoint(T.ObjectConstant1, T.ArrayConstant1);
-
- CheckDisjoint(T.ObjectConstant1, T.ObjectClass);
- CheckDisjoint(T.ObjectConstant2, T.ObjectClass);
- CheckDisjoint(T.ObjectConstant1, T.ArrayClass);
- CheckDisjoint(T.ObjectConstant2, T.ArrayClass);
- CheckDisjoint(T.ArrayConstant1, T.ObjectClass);
+ CheckOverlap(T.Any, T.Any, T.Semantic);
+ CheckOverlap(T.Object, T.Object, T.Semantic);
+
+ CheckOverlap(T.Oddball, T.Any, T.Semantic);
+ CheckOverlap(T.Boolean, T.Oddball, T.Semantic);
+ CheckOverlap(T.Null, T.Oddball, T.Semantic);
+ CheckOverlap(T.Undefined, T.Oddball, T.Semantic);
+ CheckDisjoint(T.Boolean, T.Null, T.Semantic);
+ CheckDisjoint(T.Undefined, T.Null, T.Semantic);
+ CheckDisjoint(T.Boolean, T.Undefined, T.Semantic);
+
+ CheckOverlap(T.Number, T.Any, T.Semantic);
+ CheckOverlap(T.SignedSmall, T.Number, T.Semantic);
+ CheckOverlap(T.Float, T.Number, T.Semantic);
+ CheckDisjoint(T.Signed32, T.Float, T.Semantic);
+
+ CheckOverlap(T.Name, T.Any, T.Semantic);
+ CheckOverlap(T.UniqueName, T.Any, T.Semantic);
+ CheckOverlap(T.UniqueName, T.Name, T.Semantic);
+ CheckOverlap(T.String, T.Name, T.Semantic);
+ CheckOverlap(T.InternalizedString, T.String, T.Semantic);
+ CheckOverlap(T.InternalizedString, T.UniqueName, T.Semantic);
+ CheckOverlap(T.InternalizedString, T.Name, T.Semantic);
+ CheckOverlap(T.Symbol, T.UniqueName, T.Semantic);
+ CheckOverlap(T.Symbol, T.Name, T.Semantic);
+ CheckOverlap(T.String, T.UniqueName, T.Semantic);
+ CheckDisjoint(T.String, T.Symbol, T.Semantic);
+ CheckDisjoint(T.InternalizedString, T.Symbol, T.Semantic);
+
+ CheckOverlap(T.Receiver, T.Any, T.Semantic);
+ CheckOverlap(T.Object, T.Any, T.Semantic);
+ CheckOverlap(T.Object, T.Receiver, T.Semantic);
+ CheckOverlap(T.Array, T.Object, T.Semantic);
+ CheckOverlap(T.Function, T.Object, T.Semantic);
+ CheckOverlap(T.Proxy, T.Receiver, T.Semantic);
+ CheckDisjoint(T.Object, T.Proxy, T.Semantic);
+ CheckDisjoint(T.Array, T.Function, T.Semantic);
+
+ CheckOverlap(T.ObjectClass, T.Any, T.Semantic);
+ CheckOverlap(T.ObjectConstant1, T.Any, T.Semantic);
+
+ CheckOverlap(T.ObjectClass, T.Object, T.Semantic);
+ CheckOverlap(T.ArrayClass, T.Object, T.Semantic);
+ CheckOverlap(T.ObjectClass, T.ObjectClass, T.Semantic);
+ CheckOverlap(T.ArrayClass, T.ArrayClass, T.Semantic);
+ CheckDisjoint(T.ObjectClass, T.ArrayClass, T.Semantic);
+
+ CheckOverlap(T.SmiConstant, T.SignedSmall, T.Semantic);
+ CheckOverlap(T.SmiConstant, T.Signed32, T.Semantic);
+ CheckOverlap(T.SmiConstant, T.Number, T.Semantic);
+ CheckDisjoint(T.SmiConstant, T.Float, T.Semantic);
+ CheckOverlap(T.ObjectConstant1, T.Object, T.Semantic);
+ CheckOverlap(T.ObjectConstant2, T.Object, T.Semantic);
+ CheckOverlap(T.ArrayConstant1, T.Object, T.Semantic);
+ CheckOverlap(T.ArrayConstant1, T.Array, T.Semantic);
+ CheckOverlap(T.ArrayConstant1, T.ArrayConstant2, T.Semantic);
+ CheckOverlap(T.ObjectConstant1, T.ObjectConstant1, T.Semantic);
+ CheckDisjoint(T.ObjectConstant1, T.ObjectConstant2, T.Semantic);
+ CheckDisjoint(T.ObjectConstant1, T.ArrayConstant1, T.Semantic);
+
+ CheckDisjoint(T.ObjectConstant1, T.ObjectClass, T.Semantic);
+ CheckDisjoint(T.ObjectConstant2, T.ObjectClass, T.Semantic);
+ CheckDisjoint(T.ObjectConstant1, T.ArrayClass, T.Semantic);
+ CheckDisjoint(T.ObjectConstant2, T.ArrayClass, T.Semantic);
+ CheckDisjoint(T.ArrayConstant1, T.ObjectClass, T.Semantic);
}
void Union() {
@@ -498,8 +506,8 @@ struct Tests : Rep {
CheckSub(T.ArrayClass, T.Union(T.ObjectClass, T.ArrayClass));
CheckSub(T.Union(T.ObjectClass, T.ArrayClass), T.Object);
CheckUnordered(T.Union(T.ObjectClass, T.ArrayClass), T.Array);
- CheckOverlap(T.Union(T.ObjectClass, T.ArrayClass), T.Array);
- CheckDisjoint(T.Union(T.ObjectClass, T.ArrayClass), T.Number);
+ CheckOverlap(T.Union(T.ObjectClass, T.ArrayClass), T.Array, T.Semantic);
+ CheckDisjoint(T.Union(T.ObjectClass, T.ArrayClass), T.Number, T.Semantic);
// Constant-constant
CHECK(this->IsConstant(T.Union(T.ObjectConstant1, T.ObjectConstant1)));
@@ -520,11 +528,16 @@ struct Tests : Rep {
CheckUnordered(
T.Union(T.ObjectConstant1, T.ObjectConstant2), T.ObjectClass);
CheckUnordered(T.Union(T.ObjectConstant1, T.ArrayConstant1), T.Array);
- CheckOverlap(T.Union(T.ObjectConstant1, T.ArrayConstant1), T.Array);
CheckOverlap(
- T.Union(T.ObjectConstant1, T.ArrayConstant1), T.ArrayConstant2);
- CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayConstant1), T.Number);
- CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayConstant1), T.ObjectClass);
+ T.Union(T.ObjectConstant1, T.ArrayConstant1), T.Array, T.Semantic);
+ CheckOverlap(
+ T.Union(T.ObjectConstant1, T.ArrayConstant1), T.ArrayConstant2,
+ T.Semantic);
+ CheckDisjoint(
+ T.Union(T.ObjectConstant1, T.ArrayConstant1), T.Number, T.Semantic);
+ CheckDisjoint(
+ T.Union(T.ObjectConstant1, T.ArrayConstant1), T.ObjectClass,
+ T.Semantic);
// Bitset-class
CHECK(this->IsBitset(T.Union(T.ObjectClass, T.Object)));
@@ -533,11 +546,12 @@ struct Tests : Rep {
CheckEqual(T.Union(T.ObjectClass, T.Object), T.Object);
CheckSub(T.None, T.Union(T.ObjectClass, T.Number));
CheckSub(T.Union(T.ObjectClass, T.Number), T.Any);
- CheckSub(T.Union(T.ObjectClass, T.Smi), T.Union(T.Object, T.Number));
+ CheckSub(
+ T.Union(T.ObjectClass, T.SignedSmall), T.Union(T.Object, T.Number));
CheckSub(T.Union(T.ObjectClass, T.Array), T.Object);
CheckUnordered(T.Union(T.ObjectClass, T.String), T.Array);
- CheckOverlap(T.Union(T.ObjectClass, T.String), T.Object);
- CheckDisjoint(T.Union(T.ObjectClass, T.String), T.Number);
+ CheckOverlap(T.Union(T.ObjectClass, T.String), T.Object, T.Semantic);
+ CheckDisjoint(T.Union(T.ObjectClass, T.String), T.Number, T.Semantic);
// Bitset-constant
CHECK(this->IsBitset(T.Union(T.SmiConstant, T.Number)));
@@ -552,8 +566,8 @@ struct Tests : Rep {
T.Union(T.ObjectConstant1, T.Signed32), T.Union(T.Object, T.Number));
CheckSub(T.Union(T.ObjectConstant1, T.Array), T.Object);
CheckUnordered(T.Union(T.ObjectConstant1, T.String), T.Array);
- CheckOverlap(T.Union(T.ObjectConstant1, T.String), T.Object);
- CheckDisjoint(T.Union(T.ObjectConstant1, T.String), T.Number);
+ CheckOverlap(T.Union(T.ObjectConstant1, T.String), T.Object, T.Semantic);
+ CheckDisjoint(T.Union(T.ObjectConstant1, T.String), T.Number, T.Semantic);
CheckEqual(T.Union(T.Signed32, T.Signed32Constant), T.Signed32);
// Class-constant
@@ -569,8 +583,11 @@ struct Tests : Rep {
CheckSub(
T.Union(T.ObjectConstant1, T.ArrayClass), T.Union(T.Array, T.Object));
CheckUnordered(T.Union(T.ObjectConstant1, T.ArrayClass), T.ArrayConstant1);
- CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectConstant2);
- CheckDisjoint(T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectClass);
+ CheckDisjoint(
+ T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectConstant2,
+ T.Semantic);
+ CheckDisjoint(
+ T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectClass, T.Semantic);
// Bitset-union
CHECK(this->IsBitset(
@@ -585,19 +602,19 @@ struct Tests : Rep {
T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Number),
T.Union(T.ObjectConstant1, T.Union(T.Number, T.ArrayClass)));
CheckSub(
- T.Double,
+ T.Float,
T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Number));
CheckSub(
T.ObjectConstant1,
- T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double));
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Float));
CheckSub(
T.None,
- T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double));
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Float));
CheckSub(
- T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double),
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Float),
T.Any);
CheckSub(
- T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Double),
+ T.Union(T.Union(T.ArrayClass, T.ObjectConstant1), T.Float),
T.Union(T.ObjectConstant1, T.Union(T.Number, T.ArrayClass)));
// Class-union
@@ -661,7 +678,9 @@ struct Tests : Rep {
T.Union(T.ObjectConstant1, T.ObjectConstant2),
T.ArrayConstant1));
CheckEqual(
- T.Union(T.Union(T.Number, T.ArrayClass), T.Union(T.Smi, T.Array)),
+ T.Union(
+ T.Union(T.Number, T.ArrayClass),
+ T.Union(T.SignedSmall, T.Array)),
T.Union(T.Number, T.Array));
}
@@ -672,7 +691,7 @@ struct Tests : Rep {
CHECK(this->IsBitset(T.Intersect(T.Any, T.None)));
CheckEqual(T.Intersect(T.None, T.Number), T.None);
- CheckEqual(T.Intersect(T.Object, T.Proxy), T.None);
+ CheckSub(T.Intersect(T.Object, T.Proxy), T.Representation);
CheckEqual(T.Intersect(T.Name, T.String), T.Intersect(T.String, T.Name));
CheckEqual(T.Intersect(T.UniqueName, T.String), T.InternalizedString);
@@ -699,15 +718,15 @@ struct Tests : Rep {
CHECK(this->IsBitset(T.Intersect(T.ObjectClass, T.Number)));
CheckEqual(T.Intersect(T.ObjectClass, T.Object), T.ObjectClass);
- CheckEqual(T.Intersect(T.ObjectClass, T.Array), T.None);
- CheckEqual(T.Intersect(T.ObjectClass, T.Number), T.None);
+ CheckSub(T.Intersect(T.ObjectClass, T.Array), T.Representation);
+ CheckSub(T.Intersect(T.ObjectClass, T.Number), T.Representation);
// Bitset-constant
- CHECK(this->IsBitset(T.Intersect(T.Smi, T.Number)));
+ CHECK(this->IsBitset(T.Intersect(T.SignedSmall, T.Number)));
CHECK(this->IsConstant(T.Intersect(T.SmiConstant, T.Number)));
CHECK(this->IsConstant(T.Intersect(T.ObjectConstant1, T.Object)));
- CheckEqual(T.Intersect(T.Smi, T.Number), T.Smi);
+ CheckEqual(T.Intersect(T.SignedSmall, T.Number), T.SignedSmall);
CheckEqual(T.Intersect(T.SmiConstant, T.Number), T.SmiConstant);
CheckEqual(T.Intersect(T.ObjectConstant1, T.Object), T.ObjectConstant1);
@@ -778,8 +797,8 @@ struct Tests : Rep {
CheckEqual(
T.Intersect(
T.Union(T.Number, T.ArrayClass),
- T.Union(T.Smi, T.Array)),
- T.Union(T.Smi, T.ArrayClass));
+ T.Union(T.SignedSmall, T.Array)),
+ T.Union(T.SignedSmall, T.ArrayClass));
CheckEqual(
T.Intersect(
T.Union(T.Number, T.ObjectClass),
diff --git a/deps/v8/test/cctest/test-utils-arm64.cc b/deps/v8/test/cctest/test-utils-arm64.cc
new file mode 100644
index 000000000..9eb32b002
--- /dev/null
+++ b/deps/v8/test/cctest/test-utils-arm64.cc
@@ -0,0 +1,425 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "arm64/utils-arm64.h"
+#include "cctest.h"
+#include "test-utils-arm64.h"
+
+using namespace v8::internal;
+
+
+#define __ masm->
+
+
+bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result) {
+ if (result != expected) {
+ printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
+ expected, result);
+ }
+
+ return expected == result;
+}
+
+
+bool Equal64(uint64_t expected, const RegisterDump*, uint64_t result) {
+ if (result != expected) {
+ printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
+ expected, result);
+ }
+
+ return expected == result;
+}
+
+
+bool EqualFP32(float expected, const RegisterDump*, float result) {
+ if (float_to_rawbits(expected) == float_to_rawbits(result)) {
+ return true;
+ } else {
+ if (std::isnan(expected) || (expected == 0.0)) {
+ printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
+ float_to_rawbits(expected), float_to_rawbits(result));
+ } else {
+ printf("Expected %.9f (0x%08" PRIx32 ")\t "
+ "Found %.9f (0x%08" PRIx32 ")\n",
+ expected, float_to_rawbits(expected),
+ result, float_to_rawbits(result));
+ }
+ return false;
+ }
+}
+
+
+bool EqualFP64(double expected, const RegisterDump*, double result) {
+ if (double_to_rawbits(expected) == double_to_rawbits(result)) {
+ return true;
+ }
+
+ if (std::isnan(expected) || (expected == 0.0)) {
+ printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
+ double_to_rawbits(expected), double_to_rawbits(result));
+ } else {
+ printf("Expected %.17f (0x%016" PRIx64 ")\t "
+ "Found %.17f (0x%016" PRIx64 ")\n",
+ expected, double_to_rawbits(expected),
+ result, double_to_rawbits(result));
+ }
+ return false;
+}
+
+
+bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg) {
+ ASSERT(reg.Is32Bits());
+ // Retrieve the corresponding X register so we can check that the upper part
+ // was properly cleared.
+ int64_t result_x = core->xreg(reg.code());
+ if ((result_x & 0xffffffff00000000L) != 0) {
+ printf("Expected 0x%08" PRIx32 "\t Found 0x%016" PRIx64 "\n",
+ expected, result_x);
+ return false;
+ }
+ uint32_t result_w = core->wreg(reg.code());
+ return Equal32(expected, core, result_w);
+}
+
+
+bool Equal64(uint64_t expected,
+ const RegisterDump* core,
+ const Register& reg) {
+ ASSERT(reg.Is64Bits());
+ uint64_t result = core->xreg(reg.code());
+ return Equal64(expected, core, result);
+}
+
+
+bool EqualFP32(float expected,
+ const RegisterDump* core,
+ const FPRegister& fpreg) {
+ ASSERT(fpreg.Is32Bits());
+ // Retrieve the corresponding D register so we can check that the upper part
+ // was properly cleared.
+ uint64_t result_64 = core->dreg_bits(fpreg.code());
+ if ((result_64 & 0xffffffff00000000L) != 0) {
+ printf("Expected 0x%08" PRIx32 " (%f)\t Found 0x%016" PRIx64 "\n",
+ float_to_rawbits(expected), expected, result_64);
+ return false;
+ }
+
+ return EqualFP32(expected, core, core->sreg(fpreg.code()));
+}
+
+
+bool EqualFP64(double expected,
+ const RegisterDump* core,
+ const FPRegister& fpreg) {
+ ASSERT(fpreg.Is64Bits());
+ return EqualFP64(expected, core, core->dreg(fpreg.code()));
+}
+
+
+bool Equal64(const Register& reg0,
+ const RegisterDump* core,
+ const Register& reg1) {
+ ASSERT(reg0.Is64Bits() && reg1.Is64Bits());
+ int64_t expected = core->xreg(reg0.code());
+ int64_t result = core->xreg(reg1.code());
+ return Equal64(expected, core, result);
+}
+
+
+static char FlagN(uint32_t flags) {
+ return (flags & NFlag) ? 'N' : 'n';
+}
+
+
+static char FlagZ(uint32_t flags) {
+ return (flags & ZFlag) ? 'Z' : 'z';
+}
+
+
+static char FlagC(uint32_t flags) {
+ return (flags & CFlag) ? 'C' : 'c';
+}
+
+
+static char FlagV(uint32_t flags) {
+ return (flags & VFlag) ? 'V' : 'v';
+}
+
+
+bool EqualNzcv(uint32_t expected, uint32_t result) {
+ ASSERT((expected & ~NZCVFlag) == 0);
+ ASSERT((result & ~NZCVFlag) == 0);
+ if (result != expected) {
+ printf("Expected: %c%c%c%c\t Found: %c%c%c%c\n",
+ FlagN(expected), FlagZ(expected), FlagC(expected), FlagV(expected),
+ FlagN(result), FlagZ(result), FlagC(result), FlagV(result));
+ return false;
+ }
+
+ return true;
+}
+
+
+bool EqualRegisters(const RegisterDump* a, const RegisterDump* b) {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if (a->xreg(i) != b->xreg(i)) {
+ printf("x%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
+ i, a->xreg(i), b->xreg(i));
+ return false;
+ }
+ }
+
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ uint64_t a_bits = a->dreg_bits(i);
+ uint64_t b_bits = b->dreg_bits(i);
+ if (a_bits != b_bits) {
+ printf("d%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
+ i, a_bits, b_bits);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
+ int reg_size, int reg_count, RegList allowed) {
+ RegList list = 0;
+ int i = 0;
+ for (unsigned n = 0; (n < kNumberOfRegisters) && (i < reg_count); n++) {
+ if (((1UL << n) & allowed) != 0) {
+ // Only assign allowed registers.
+ if (r) {
+ r[i] = Register::Create(n, reg_size);
+ }
+ if (x) {
+ x[i] = Register::Create(n, kXRegSizeInBits);
+ }
+ if (w) {
+ w[i] = Register::Create(n, kWRegSizeInBits);
+ }
+ list |= (1UL << n);
+ i++;
+ }
+ }
+ // Check that we got enough registers.
+ ASSERT(CountSetBits(list, kNumberOfRegisters) == reg_count);
+
+ return list;
+}
+
+
+RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
+ int reg_size, int reg_count, RegList allowed) {
+ RegList list = 0;
+ int i = 0;
+ for (unsigned n = 0; (n < kNumberOfFPRegisters) && (i < reg_count); n++) {
+ if (((1UL << n) & allowed) != 0) {
+ // Only assigned allowed registers.
+ if (v) {
+ v[i] = FPRegister::Create(n, reg_size);
+ }
+ if (d) {
+ d[i] = FPRegister::Create(n, kDRegSizeInBits);
+ }
+ if (s) {
+ s[i] = FPRegister::Create(n, kSRegSizeInBits);
+ }
+ list |= (1UL << n);
+ i++;
+ }
+ }
+ // Check that we got enough registers.
+ ASSERT(CountSetBits(list, kNumberOfFPRegisters) == reg_count);
+
+ return list;
+}
+
+
+void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
+ Register first = NoReg;
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if (reg_list & (1UL << i)) {
+ Register xn = Register::Create(i, kXRegSizeInBits);
+ // We should never write into csp here.
+ ASSERT(!xn.Is(csp));
+ if (!xn.IsZero()) {
+ if (!first.IsValid()) {
+ // This is the first register we've hit, so construct the literal.
+ __ Mov(xn, value);
+ first = xn;
+ } else {
+ // We've already loaded the literal, so re-use the value already
+ // loaded into the first register we hit.
+ __ Mov(xn, first);
+ }
+ }
+ }
+ }
+}
+
+
+void ClobberFP(MacroAssembler* masm, RegList reg_list, double const value) {
+ FPRegister first = NoFPReg;
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ if (reg_list & (1UL << i)) {
+ FPRegister dn = FPRegister::Create(i, kDRegSizeInBits);
+ if (!first.IsValid()) {
+ // This is the first register we've hit, so construct the literal.
+ __ Fmov(dn, value);
+ first = dn;
+ } else {
+ // We've already loaded the literal, so re-use the value already loaded
+ // into the first register we hit.
+ __ Fmov(dn, first);
+ }
+ }
+ }
+}
+
+
+void Clobber(MacroAssembler* masm, CPURegList reg_list) {
+ if (reg_list.type() == CPURegister::kRegister) {
+ // This will always clobber X registers.
+ Clobber(masm, reg_list.list());
+ } else if (reg_list.type() == CPURegister::kFPRegister) {
+ // This will always clobber D registers.
+ ClobberFP(masm, reg_list.list());
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void RegisterDump::Dump(MacroAssembler* masm) {
+ ASSERT(__ StackPointer().Is(csp));
+
+ // Ensure that we don't unintentionally clobber any registers.
+ RegList old_tmp_list = masm->TmpList()->list();
+ RegList old_fptmp_list = masm->FPTmpList()->list();
+ masm->TmpList()->set_list(0);
+ masm->FPTmpList()->set_list(0);
+
+ // Preserve some temporary registers.
+ Register dump_base = x0;
+ Register dump = x1;
+ Register tmp = x2;
+ Register dump_base_w = dump_base.W();
+ Register dump_w = dump.W();
+ Register tmp_w = tmp.W();
+
+ // Offsets into the dump_ structure.
+ const int x_offset = offsetof(dump_t, x_);
+ const int w_offset = offsetof(dump_t, w_);
+ const int d_offset = offsetof(dump_t, d_);
+ const int s_offset = offsetof(dump_t, s_);
+ const int sp_offset = offsetof(dump_t, sp_);
+ const int wsp_offset = offsetof(dump_t, wsp_);
+ const int flags_offset = offsetof(dump_t, flags_);
+
+ __ Push(xzr, dump_base, dump, tmp);
+
+ // Load the address where we will dump the state.
+ __ Mov(dump_base, reinterpret_cast<uint64_t>(&dump_));
+
+ // Dump the stack pointer (csp and wcsp).
+ // The stack pointer cannot be stored directly; it needs to be moved into
+ // another register first. Also, we pushed four X registers, so we need to
+ // compensate here.
+ __ Add(tmp, csp, 4 * kXRegSize);
+ __ Str(tmp, MemOperand(dump_base, sp_offset));
+ __ Add(tmp_w, wcsp, 4 * kXRegSize);
+ __ Str(tmp_w, MemOperand(dump_base, wsp_offset));
+
+ // Dump X registers.
+ __ Add(dump, dump_base, x_offset);
+ for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
+ __ Stp(Register::XRegFromCode(i), Register::XRegFromCode(i + 1),
+ MemOperand(dump, i * kXRegSize));
+ }
+
+ // Dump W registers.
+ __ Add(dump, dump_base, w_offset);
+ for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
+ __ Stp(Register::WRegFromCode(i), Register::WRegFromCode(i + 1),
+ MemOperand(dump, i * kWRegSize));
+ }
+
+ // Dump D registers.
+ __ Add(dump, dump_base, d_offset);
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
+ __ Stp(FPRegister::DRegFromCode(i), FPRegister::DRegFromCode(i + 1),
+ MemOperand(dump, i * kDRegSize));
+ }
+
+ // Dump S registers.
+ __ Add(dump, dump_base, s_offset);
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
+ __ Stp(FPRegister::SRegFromCode(i), FPRegister::SRegFromCode(i + 1),
+ MemOperand(dump, i * kSRegSize));
+ }
+
+ // Dump the flags.
+ __ Mrs(tmp, NZCV);
+ __ Str(tmp, MemOperand(dump_base, flags_offset));
+
+ // To dump the values that were in tmp amd dump, we need a new scratch
+ // register. We can use any of the already dumped registers since we can
+ // easily restore them.
+ Register dump2_base = x10;
+ Register dump2 = x11;
+ ASSERT(!AreAliased(dump_base, dump, tmp, dump2_base, dump2));
+
+ // Don't lose the dump_ address.
+ __ Mov(dump2_base, dump_base);
+
+ __ Pop(tmp, dump, dump_base, xzr);
+
+ __ Add(dump2, dump2_base, w_offset);
+ __ Str(dump_base_w, MemOperand(dump2, dump_base.code() * kWRegSize));
+ __ Str(dump_w, MemOperand(dump2, dump.code() * kWRegSize));
+ __ Str(tmp_w, MemOperand(dump2, tmp.code() * kWRegSize));
+
+ __ Add(dump2, dump2_base, x_offset);
+ __ Str(dump_base, MemOperand(dump2, dump_base.code() * kXRegSize));
+ __ Str(dump, MemOperand(dump2, dump.code() * kXRegSize));
+ __ Str(tmp, MemOperand(dump2, tmp.code() * kXRegSize));
+
+ // Finally, restore dump2_base and dump2.
+ __ Ldr(dump2_base, MemOperand(dump2, dump2_base.code() * kXRegSize));
+ __ Ldr(dump2, MemOperand(dump2, dump2.code() * kXRegSize));
+
+ // Restore the MacroAssembler's scratch registers.
+ masm->TmpList()->set_list(old_tmp_list);
+ masm->FPTmpList()->set_list(old_fptmp_list);
+
+ completed_ = true;
+}
diff --git a/deps/v8/test/cctest/test-utils-arm64.h b/deps/v8/test/cctest/test-utils-arm64.h
new file mode 100644
index 000000000..2ff26e49c
--- /dev/null
+++ b/deps/v8/test/cctest/test-utils-arm64.h
@@ -0,0 +1,233 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM64_TEST_UTILS_ARM64_H_
+#define V8_ARM64_TEST_UTILS_ARM64_H_
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "arm64/macro-assembler-arm64.h"
+#include "arm64/utils-arm64.h"
+#include "cctest.h"
+
+
+using namespace v8::internal;
+
+
+// RegisterDump: Object allowing integer, floating point and flags registers
+// to be saved to itself for future reference.
+class RegisterDump {
+ public:
+ RegisterDump() : completed_(false) {}
+
+ // The Dump method generates code to store a snapshot of the register values.
+ // It needs to be able to use the stack temporarily, and requires that the
+ // current stack pointer is csp, and is properly aligned.
+ //
+ // The dumping code is generated though the given MacroAssembler. No registers
+ // are corrupted in the process, but the stack is used briefly. The flags will
+ // be corrupted during this call.
+ void Dump(MacroAssembler* assm);
+
+ // Register accessors.
+ inline int32_t wreg(unsigned code) const {
+ if (code == kSPRegInternalCode) {
+ return wspreg();
+ }
+ ASSERT(RegAliasesMatch(code));
+ return dump_.w_[code];
+ }
+
+ inline int64_t xreg(unsigned code) const {
+ if (code == kSPRegInternalCode) {
+ return spreg();
+ }
+ ASSERT(RegAliasesMatch(code));
+ return dump_.x_[code];
+ }
+
+ // FPRegister accessors.
+ inline uint32_t sreg_bits(unsigned code) const {
+ ASSERT(FPRegAliasesMatch(code));
+ return dump_.s_[code];
+ }
+
+ inline float sreg(unsigned code) const {
+ return rawbits_to_float(sreg_bits(code));
+ }
+
+ inline uint64_t dreg_bits(unsigned code) const {
+ ASSERT(FPRegAliasesMatch(code));
+ return dump_.d_[code];
+ }
+
+ inline double dreg(unsigned code) const {
+ return rawbits_to_double(dreg_bits(code));
+ }
+
+ // Stack pointer accessors.
+ inline int64_t spreg() const {
+ ASSERT(SPRegAliasesMatch());
+ return dump_.sp_;
+ }
+
+ inline int64_t wspreg() const {
+ ASSERT(SPRegAliasesMatch());
+ return dump_.wsp_;
+ }
+
+ // Flags accessors.
+ inline uint64_t flags_nzcv() const {
+ ASSERT(IsComplete());
+ ASSERT((dump_.flags_ & ~Flags_mask) == 0);
+ return dump_.flags_ & Flags_mask;
+ }
+
+ inline bool IsComplete() const {
+ return completed_;
+ }
+
+ private:
+ // Indicate whether the dump operation has been completed.
+ bool completed_;
+
+ // Check that the lower 32 bits of x<code> exactly match the 32 bits of
+ // w<code>. A failure of this test most likely represents a failure in the
+ // ::Dump method, or a failure in the simulator.
+ bool RegAliasesMatch(unsigned code) const {
+ ASSERT(IsComplete());
+ ASSERT(code < kNumberOfRegisters);
+ return ((dump_.x_[code] & kWRegMask) == dump_.w_[code]);
+ }
+
+ // As RegAliasesMatch, but for the stack pointer.
+ bool SPRegAliasesMatch() const {
+ ASSERT(IsComplete());
+ return ((dump_.sp_ & kWRegMask) == dump_.wsp_);
+ }
+
+ // As RegAliasesMatch, but for floating-point registers.
+ bool FPRegAliasesMatch(unsigned code) const {
+ ASSERT(IsComplete());
+ ASSERT(code < kNumberOfFPRegisters);
+ return (dump_.d_[code] & kSRegMask) == dump_.s_[code];
+ }
+
+ // Store all the dumped elements in a simple struct so the implementation can
+ // use offsetof to quickly find the correct field.
+ struct dump_t {
+ // Core registers.
+ uint64_t x_[kNumberOfRegisters];
+ uint32_t w_[kNumberOfRegisters];
+
+ // Floating-point registers, as raw bits.
+ uint64_t d_[kNumberOfFPRegisters];
+ uint32_t s_[kNumberOfFPRegisters];
+
+ // The stack pointer.
+ uint64_t sp_;
+ uint64_t wsp_;
+
+ // NZCV flags, stored in bits 28 to 31.
+ // bit[31] : Negative
+ // bit[30] : Zero
+ // bit[29] : Carry
+ // bit[28] : oVerflow
+ uint64_t flags_;
+ } dump_;
+
+ static dump_t for_sizeof();
+ STATIC_ASSERT(sizeof(for_sizeof().d_[0]) == kDRegSize);
+ STATIC_ASSERT(sizeof(for_sizeof().s_[0]) == kSRegSize);
+ STATIC_ASSERT(sizeof(for_sizeof().d_[0]) == kXRegSize);
+ STATIC_ASSERT(sizeof(for_sizeof().s_[0]) == kWRegSize);
+ STATIC_ASSERT(sizeof(for_sizeof().x_[0]) == kXRegSize);
+ STATIC_ASSERT(sizeof(for_sizeof().w_[0]) == kWRegSize);
+};
+
+// Some of these methods don't use the RegisterDump argument, but they have to
+// accept them so that they can overload those that take register arguments.
+bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result);
+bool Equal64(uint64_t expected, const RegisterDump*, uint64_t result);
+
+bool EqualFP32(float expected, const RegisterDump*, float result);
+bool EqualFP64(double expected, const RegisterDump*, double result);
+
+bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg);
+bool Equal64(uint64_t expected, const RegisterDump* core, const Register& reg);
+
+bool EqualFP32(float expected, const RegisterDump* core,
+ const FPRegister& fpreg);
+bool EqualFP64(double expected, const RegisterDump* core,
+ const FPRegister& fpreg);
+
+bool Equal64(const Register& reg0, const RegisterDump* core,
+ const Register& reg1);
+
+bool EqualNzcv(uint32_t expected, uint32_t result);
+
+bool EqualRegisters(const RegisterDump* a, const RegisterDump* b);
+
+// Populate the w, x and r arrays with registers from the 'allowed' mask. The
+// r array will be populated with <reg_size>-sized registers,
+//
+// This allows for tests which use large, parameterized blocks of registers
+// (such as the push and pop tests), but where certain registers must be
+// avoided as they are used for other purposes.
+//
+// Any of w, x, or r can be NULL if they are not required.
+//
+// The return value is a RegList indicating which registers were allocated.
+RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
+ int reg_size, int reg_count, RegList allowed);
+
+// As PopulateRegisterArray, but for floating-point registers.
+RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
+ int reg_size, int reg_count, RegList allowed);
+
+// Ovewrite the contents of the specified registers. This enables tests to
+// check that register contents are written in cases where it's likely that the
+// correct outcome could already be stored in the register.
+//
+// This always overwrites X-sized registers. If tests are operating on W
+// registers, a subsequent write into an aliased W register should clear the
+// top word anyway, so clobbering the full X registers should make tests more
+// rigorous.
+void Clobber(MacroAssembler* masm, RegList reg_list,
+ uint64_t const value = 0xfedcba9876543210UL);
+
+// As Clobber, but for FP registers.
+void ClobberFP(MacroAssembler* masm, RegList reg_list,
+ double const value = kFP64SignallingNaN);
+
+// As Clobber, but for a CPURegList with either FP or integer registers. When
+// using this method, the clobber value is always the default for the basic
+// Clobber or ClobberFP functions.
+void Clobber(MacroAssembler* masm, CPURegList reg_list);
+
+#endif // V8_ARM64_TEST_UTILS_ARM64_H_
diff --git a/deps/v8/test/cctest/testcfg.py b/deps/v8/test/cctest/testcfg.py
index 4ab5ab5b7..bd93450a9 100644
--- a/deps/v8/test/cctest/testcfg.py
+++ b/deps/v8/test/cctest/testcfg.py
@@ -38,8 +38,12 @@ class CcTestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(CcTestSuite, self).__init__(name, root)
+ if utils.IsWindows():
+ build_dir = "build"
+ else:
+ build_dir = "out"
self.serdes_dir = os.path.normpath(
- os.path.join(root, "..", "..", "out", ".serdes"))
+ os.path.join(root, "..", "..", build_dir, ".serdes"))
if os.path.exists(self.serdes_dir):
shutil.rmtree(self.serdes_dir, True)
os.makedirs(self.serdes_dir)
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index fc3c66b9c..4ecbf325a 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -25,9 +25,10 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# The following tests use getDefaultTimeZone().
[
[ALWAYS, {
- # The following tests use getDefaultTimeZone().
'date-format/resolved-options': [FAIL],
'date-format/timezone': [FAIL],
'general/v8Intl-exists': [FAIL],
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index 234bf0f35..00f6e3472 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -25,9 +25,10 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# All tests in the bug directory are expected to fail.
[
[ALWAYS, {
- # All tests in the bug directory are expected to fail.
'bugs/*': [FAIL],
}], # ALWAYS
]
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index e4f3f5587..b472f9cfb 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -73,7 +73,7 @@ class MessageTestSuite(testsuite.TestSuite):
return f.read()
def _IgnoreLine(self, string):
- """Ignore empty lines, valgrind output and Android output."""
+ """Ignore empty lines, valgrind output, Android output."""
if not string: return True
return (string.startswith("==") or string.startswith("**") or
string.startswith("ANDROID") or
diff --git a/deps/v8/test/mjsunit/allocation-site-info.js b/deps/v8/test/mjsunit/allocation-site-info.js
index cd086d350..35b60ee26 100644
--- a/deps/v8/test/mjsunit/allocation-site-info.js
+++ b/deps/v8/test/mjsunit/allocation-site-info.js
@@ -232,14 +232,13 @@ if (support_smi_only_arrays) {
obj = newarraycase_length_smidouble(2);
assertKind(elements_kind.fast_double, obj);
- // Try to continue the transition to fast object. This won't work for
- // constructed arrays because constructor dispatch is done on the
- // elements kind, and a DOUBLE array constructor won't create an allocation
- // memento.
+ // Try to continue the transition to fast object.
+ // TODO(mvstanton): re-enable commented out code when
+ // FLAG_pretenuring_call_new is turned on in the build.
obj = newarraycase_length_smidouble("coates");
assertKind(elements_kind.fast, obj);
obj = newarraycase_length_smidouble(2);
- assertKind(elements_kind.fast_double, obj);
+ // assertKind(elements_kind.fast, obj);
function newarraycase_length_smiobj(value) {
var a = new Array(3);
diff --git a/deps/v8/test/mjsunit/array-constructor-feedback.js b/deps/v8/test/mjsunit/array-constructor-feedback.js
index 7cd421bd1..45d5c58c7 100644
--- a/deps/v8/test/mjsunit/array-constructor-feedback.js
+++ b/deps/v8/test/mjsunit/array-constructor-feedback.js
@@ -82,8 +82,9 @@ function assertKind(expected, obj, name_opt) {
if (support_smi_only_arrays) {
- // Test: If a call site goes megamorphic, it loses the ability to
- // use allocation site feedback.
+ // Test: If a call site goes megamorphic, it retains the ability to
+ // use allocation site feedback (if FLAG_allocation_site_pretenuring
+ // is on).
(function() {
function bar(t, len) {
return new t(len);
@@ -95,10 +96,9 @@ if (support_smi_only_arrays) {
assertKind(elements_kind.fast_double, b);
c = bar(Object, 3);
b = bar(Array, 10);
- assertKind(elements_kind.fast_smi_only, b);
- b[0] = 3.5;
- c = bar(Array, 10);
- assertKind(elements_kind.fast_smi_only, c);
+ // TODO(mvstanton): re-enable when FLAG_allocation_site_pretenuring
+ // is on in the build.
+ // assertKind(elements_kind.fast_double, b);
})();
@@ -123,13 +123,16 @@ if (support_smi_only_arrays) {
bar0(Array);
%OptimizeFunctionOnNextCall(bar0);
b = bar0(Array);
- // We also lost our ability to record kind feedback, as the site
- // is megamorphic now.
- assertKind(elements_kind.fast_smi_only, b);
- assertOptimized(bar0);
- b[0] = 3.5;
- c = bar0(Array);
- assertKind(elements_kind.fast_smi_only, c);
+ // This only makes sense to test if we allow crankshafting
+ if (4 != %GetOptimizationStatus(bar0)) {
+ // We also lost our ability to record kind feedback, as the site
+ // is megamorphic now.
+ assertKind(elements_kind.fast_smi_only, b);
+ assertOptimized(bar0);
+ b[0] = 3.5;
+ c = bar0(Array);
+ assertKind(elements_kind.fast_smi_only, c);
+ }
})();
diff --git a/deps/v8/test/mjsunit/array-reduce.js b/deps/v8/test/mjsunit/array-reduce.js
index 429f34808..429f34808 100755..100644
--- a/deps/v8/test/mjsunit/array-reduce.js
+++ b/deps/v8/test/mjsunit/array-reduce.js
diff --git a/deps/v8/test/mjsunit/assert-opt-and-deopt.js b/deps/v8/test/mjsunit/assert-opt-and-deopt.js
index d653bb590..d0caafa27 100644
--- a/deps/v8/test/mjsunit/assert-opt-and-deopt.js
+++ b/deps/v8/test/mjsunit/assert-opt-and-deopt.js
@@ -25,7 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --noconcurrent-recompilation
+// Flags: --allow-natives-syntax
+// Flags: --noconcurrent-recompilation --noconcurrent-osr
if (%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is turned on after all. Skipping this test.");
diff --git a/deps/v8/test/mjsunit/compiler/compare_map_elim.js b/deps/v8/test/mjsunit/compiler/compare-map-elim.js
index 288d4811a..288d4811a 100644
--- a/deps/v8/test/mjsunit/compiler/compare_map_elim.js
+++ b/deps/v8/test/mjsunit/compiler/compare-map-elim.js
diff --git a/deps/v8/test/mjsunit/compiler/compare-map-elim2.js b/deps/v8/test/mjsunit/compiler/compare-map-elim2.js
new file mode 100644
index 000000000..0c0540cca
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/compare-map-elim2.js
@@ -0,0 +1,130 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --check-elimination
+
+
+function test_empty() {
+ function foo(o) {
+ return { value: o.value };
+ }
+
+ function Base() {
+ this.v_ = 5;
+ }
+ Base.prototype.__defineGetter__("value", function() { return 1; });
+
+ var a = new Base();
+ a.a = 1;
+ foo(a);
+
+ Base.prototype.__defineGetter__("value", function() { return this.v_; });
+
+ var b = new Base();
+ b.b = 1;
+ foo(b);
+
+ var d = new Base();
+ d.d = 1;
+ d.value;
+
+ %OptimizeFunctionOnNextCall(foo);
+
+ var o = foo(b);
+}
+
+
+function test_narrow1() {
+ function foo(o) {
+ return { value: o.value };
+ }
+
+ function Base() {
+ this.v_ = 5;
+ }
+ Base.prototype.__defineGetter__("value", function() { return 1; });
+
+ var a = new Base();
+ a.a = 1;
+ foo(a);
+
+ Base.prototype.__defineGetter__("value", function() { return this.v_; });
+
+ var b = new Base();
+ b.b = 1;
+ foo(b);
+
+ var c = new Base();
+ c.c = 1;
+ foo(c);
+
+ var d = new Base();
+ d.d = 1;
+ d.value;
+
+ %OptimizeFunctionOnNextCall(foo);
+
+ var o = foo(b);
+}
+
+
+function test_narrow2() {
+ function foo(o, flag) {
+ return { value: o.value(flag) };
+ }
+
+ function Base() {
+ this.v_ = 5;
+ }
+ Base.prototype.value = function(flag) { return flag ? this.v_ : this.v_; };
+
+
+ var a = new Base();
+ a.a = 1;
+ foo(a, false);
+ foo(a, false);
+
+ var b = new Base();
+ b.b = 1;
+ foo(b, true);
+
+ var c = new Base();
+ c.c = 1;
+ foo(c, true);
+
+ var d = new Base();
+ d.d = 1;
+ d.value(true);
+
+ %OptimizeFunctionOnNextCall(foo);
+
+ var o = foo(b);
+}
+
+test_empty();
+test_narrow1();
+test_narrow2();
diff --git a/deps/v8/test/mjsunit/compiler/compare_objeq_elim.js b/deps/v8/test/mjsunit/compiler/compare-objeq-elim.js
index 4492df45c..4492df45c 100644
--- a/deps/v8/test/mjsunit/compiler/compare_objeq_elim.js
+++ b/deps/v8/test/mjsunit/compiler/compare-objeq-elim.js
diff --git a/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js b/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
index 2a20790ea..ab7d6d50e 100644
--- a/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
+++ b/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
@@ -43,9 +43,10 @@ function new_object() {
function add_field(obj) {
obj.c = 3;
}
-
-add_field(new_object());
-add_field(new_object());
+var obj1 = new_object();
+var obj2 = new_object();
+add_field(obj1);
+add_field(obj2);
%OptimizeFunctionOnNextCall(add_field, "concurrent");
var o = new_object();
diff --git a/deps/v8/test/mjsunit/compiler/dead-string-char-code-at.js b/deps/v8/test/mjsunit/compiler/dead-string-char-code-at.js
index 56835ce5a..9f01541c9 100644
--- a/deps/v8/test/mjsunit/compiler/dead-string-char-code-at.js
+++ b/deps/v8/test/mjsunit/compiler/dead-string-char-code-at.js
@@ -31,21 +31,21 @@ var S1 = "string1";
var S2 = "@@string2";
function dead1(a, b) {
- var x = %StringCharCodeAt(a, 4);
+ var x = %_StringCharCodeAt(a, 4);
return a; // x is dead code
}
function dead2(a, b) {
- var x = %StringCharCodeAt(a, 3);
- var y = %StringCharCodeAt(b, 1);
+ var x = %_StringCharCodeAt(a, 3);
+ var y = %_StringCharCodeAt(b, 1);
return a; // x and y are both dead
}
function dead3(a, b) {
a = a ? "11" : "12";
b = b ? "13" : "14";
- var x = %StringCharCodeAt(a, 2);
- var y = %StringCharCodeAt(b, 0);
+ var x = %_StringCharCodeAt(a, 2);
+ var y = %_StringCharCodeAt(b, 0);
return a; // x and y are both dead
}
diff --git a/deps/v8/test/mjsunit/compiler/division-by-constant.js b/deps/v8/test/mjsunit/compiler/division-by-constant.js
new file mode 100644
index 000000000..0778e95b8
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/division-by-constant.js
@@ -0,0 +1,131 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --no-use-inlining
+
+// -----------------------------------------------------------------------------
+
+function ConstructDiv(divisor) {
+ return "return ((dividend | 0) / ((" + divisor + ") | 0)) | 0";
+}
+
+var RefDivByConstI =
+ new Function("dividend", "divisor", ConstructDiv("divisor"));
+
+%NeverOptimizeFunction(RefDivByConstI);
+
+// -----------------------------------------------------------------------------
+
+function ConstructMod(divisor) {
+ return "return ((dividend | 0) % ((" + divisor + ") | 0)) | 0";
+}
+
+var RefModByConstI =
+ new Function("dividend", "divisor", ConstructMod("divisor"));
+
+%NeverOptimizeFunction(RefModByConstI);
+
+// -----------------------------------------------------------------------------
+
+function ConstructFlooringDiv(divisor) {
+ return "return Math.floor(dividend / (" + divisor + ")) | 0";
+}
+
+var RefFlooringDivByConstI =
+ new Function("dividend", "divisor", ConstructFlooringDiv("divisor"));
+
+%NeverOptimizeFunction(RefFlooringDivByConstI);
+
+// -----------------------------------------------------------------------------
+
+function PushSymmetric(values, x) {
+ values.push(x, -x);
+}
+
+function PushRangeSymmetric(values, from, to) {
+ for (var x = from; x <= to; x++) {
+ PushSymmetric(values, x);
+ }
+}
+
+function CreateTestValues() {
+ var values = [
+ // -(2^31)
+ -2147483648,
+ // Some values from "Hacker's Delight", chapter 10-7.
+ 715827883, 1431655766, -1431655765, -1431655764,
+ // Some "randomly" chosen numbers.
+ 123, -1234, 12345, -123456, 1234567, -12345678, 123456789
+ ];
+ // Powers of 2
+ for (var shift = 6; shift < 31; shift++) {
+ PushSymmetric(values, 1 << shift);
+ }
+ // Values near zero
+ PushRangeSymmetric(values, 1, 32);
+ // Various magnitudes
+ PushRangeSymmetric(values, 100, 109);
+ PushRangeSymmetric(values, 1000, 1009);
+ PushRangeSymmetric(values, 10000, 10009);
+ PushRangeSymmetric(values, 100000, 100009);
+ PushRangeSymmetric(values, 1000000, 1000009);
+ PushRangeSymmetric(values, 10000000, 10000009);
+ PushRangeSymmetric(values, 100000000, 100000009);
+ PushRangeSymmetric(values, 1000000000, 1000000009);
+ return values;
+}
+
+// -----------------------------------------------------------------------------
+
+function TestDivisionLike(ref, construct, values, divisor) {
+ // Define the function to test.
+ var OptFun = new Function("dividend", construct(divisor));
+
+ // Warm up type feedback.
+ OptFun(7);
+ OptFun(11);
+ %OptimizeFunctionOnNextCall(OptFun);
+ OptFun(13);
+
+ // Check results.
+ values.forEach(function(dividend) {
+ // Avoid deopt caused by overflow, we do not want to test this here.
+ if (dividend === -2147483648 && divisor === -1) return;
+ assertEquals(ref(dividend, divisor), OptFun(dividend));
+ });
+}
+
+function Test(ref, construct) {
+ var values = CreateTestValues();
+ values.forEach(function(divisor) {
+ TestDivisionLike(ref, construct, values, divisor);
+ });
+}
+
+Test(RefDivByConstI, ConstructDiv);
+Test(RefModByConstI, ConstructMod);
+Test(RefFlooringDivByConstI, ConstructFlooringDiv);
diff --git a/deps/v8/test/mjsunit/compiler/smi-stores-opt.js b/deps/v8/test/mjsunit/compiler/smi-stores-opt.js
new file mode 100644
index 000000000..ca0923abc
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/smi-stores-opt.js
@@ -0,0 +1,49 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var o = {a:1.5};
+o.a = 0;
+var a = o.a;
+
+function g() {
+ return 1;
+}
+
+var o2 = {a:{}};
+
+function f() {
+ var result = {a: a};
+ var literal = {x:g()};
+ return [result, literal];
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+assertEquals(1, f()[1].x);
diff --git a/deps/v8/test/mjsunit/compiler/store-elimination.js b/deps/v8/test/mjsunit/compiler/store-elimination.js
new file mode 100644
index 000000000..1806ed963
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/store-elimination.js
@@ -0,0 +1,94 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --store-elimination
+
+// Test local elimination of unobservable stores.
+
+function B(x, y) {
+ this.x = x;
+ this.y = y;
+ return this;
+}
+
+function test_store_store() {
+ var a = new B(1, 2);
+ a.x = 3; // eliminatable.
+ a.x = 4;
+ return a.x;
+}
+
+function test_store_load_store1() {
+ var a = new B(6, 7);
+ a.x = 3; // eliminatable.
+ var r = a.y;
+ a.x = 4;
+ return r;
+}
+
+function test_store_load_store2() {
+ var a = new B(6, 8);
+ a.x = 3; // not eliminatable, unless next load is eliminated.
+ var r = a.x;
+ a.x = 4;
+ return r;
+}
+
+function test_store_call_store() {
+ var a = new B(2, 9);
+ a.x = 3; // not eliminatable.
+ killall();
+ a.x = 4;
+ return a.y;
+}
+
+function test_store_deopt_store() {
+ var a = new B(2, 1);
+ a.x = 3; // not eliminatable (implicit ValueOf following)
+ var c = a + 2;
+ a.x = 4;
+ return a.y;
+}
+
+function killall() {
+ try { } catch(e) { }
+}
+
+%NeverOptimizeFunction(killall);
+
+function test(x, f) {
+ assertEquals(x, f());
+ assertEquals(x, f());
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(x, f());
+}
+
+test(4, test_store_store);
+test(7, test_store_load_store1);
+test(3, test_store_load_store2);
+test(9, test_store_call_store);
+test(1, test_store_deopt_store);
diff --git a/deps/v8/test/mjsunit/compiler/to-fast-properties.js b/deps/v8/test/mjsunit/compiler/to-fast-properties.js
new file mode 100644
index 000000000..26829d95e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/to-fast-properties.js
@@ -0,0 +1,43 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This test requires OSR or --stress-runs=3 to optimize the top level script.
+
+for (var i = 0; i < 3; i++) {
+ // HToFastProperties is used for top-level object literals that have
+ // function property.
+ var obj = {
+ index: function() { return i; },
+ x: 0
+ }
+ var n = 10000;
+ // Loop to hit OSR.
+ for (var j = 0; j < n; j++) {
+ obj.x += i;
+ }
+ assertEquals(obj.index() * n, obj.x);
+}
diff --git a/deps/v8/test/mjsunit/constant-fold-control-instructions.js b/deps/v8/test/mjsunit/constant-fold-control-instructions.js
new file mode 100644
index 000000000..eb1b0f3c0
--- /dev/null
+++ b/deps/v8/test/mjsunit/constant-fold-control-instructions.js
@@ -0,0 +1,47 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --fold-constants
+
+function test() {
+ assertEquals("string", typeof "");
+ assertEquals("number", typeof 1.1);
+ assertEquals("number", typeof 1);
+ assertEquals("boolean", typeof true);
+ assertEquals("function", typeof function() {});
+ assertEquals("object", typeof null);
+ assertEquals("object", typeof {});
+
+ assertTrue(%_IsObject({}));
+ assertTrue(%_IsObject(null));
+ assertTrue(%_IsObject(/regex/));
+ assertFalse(%_IsObject(0));
+ assertFalse(%_IsObject(""));
+
+ assertTrue(%_IsSmi(1));
+ assertFalse(%_IsSmi(1.1));
+ assertFalse(%_IsSmi({}));
+
+ assertTrue(%_IsRegExp(/regexp/));
+ assertFalse(%_IsRegExp({}));
+
+ assertTrue(%_IsArray([1]));
+ assertFalse(%_IsArray(function() {}));
+
+ assertTrue(%_IsFunction(function() {}));
+ assertFalse(%_IsFunction(null));
+
+ assertTrue(%_IsSpecObject(new Date()));
+ assertFalse(%_IsSpecObject(1));
+
+ assertTrue(%_IsMinusZero(-0.0));
+ assertFalse(%_IsMinusZero(1));
+ assertFalse(%_IsMinusZero(""));
+}
+
+
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
diff --git a/deps/v8/test/mjsunit/debug-scopes.js b/deps/v8/test/mjsunit/debug-scopes.js
index 942bd2bb0..f5b5ec913 100644
--- a/deps/v8/test/mjsunit/debug-scopes.js
+++ b/deps/v8/test/mjsunit/debug-scopes.js
@@ -71,18 +71,42 @@ function BeginTest(name) {
// Check result of a test.
function EndTest() {
assertTrue(listener_called, "listerner not called for " + test_name);
- assertNull(exception, test_name);
+ assertNull(exception, test_name + " / " + exception);
end_test_count++;
}
+// Check that two scope are the same.
+function assertScopeMirrorEquals(scope1, scope2) {
+ assertEquals(scope1.scopeType(), scope2.scopeType());
+ assertEquals(scope1.frameIndex(), scope2.frameIndex());
+ assertEquals(scope1.scopeIndex(), scope2.scopeIndex());
+ assertPropertiesEqual(scope1.scopeObject().value(), scope2.scopeObject().value());
+}
+
+function CheckFastAllScopes(scopes, exec_state)
+{
+ var fast_all_scopes = exec_state.frame().allScopes(true);
+ var length = fast_all_scopes.length;
+ assertTrue(scopes.length >= length);
+ for (var i = 0; i < scopes.length && i < length; i++) {
+ var scope = fast_all_scopes[length - i - 1];
+ assertTrue(scope.isScope());
+ assertEquals(scopes[scopes.length - i - 1], scope.scopeType());
+ }
+}
+
+
// Check that the scope chain contains the expected types of scopes.
function CheckScopeChain(scopes, exec_state) {
+ var all_scopes = exec_state.frame().allScopes();
assertEquals(scopes.length, exec_state.frame().scopeCount());
+ assertEquals(scopes.length, all_scopes.length, "FrameMirror.allScopes length");
for (var i = 0; i < scopes.length; i++) {
var scope = exec_state.frame().scope(i);
assertTrue(scope.isScope());
assertEquals(scopes[i], scope.scopeType());
+ assertScopeMirrorEquals(all_scopes[i], scope);
// Check the global object when hitting the global scope.
if (scopes[i] == debug.ScopeType.Global) {
@@ -91,6 +115,7 @@ function CheckScopeChain(scopes, exec_state) {
assertPropertiesEqual(this, scope.scopeObject().value());
}
}
+ CheckFastAllScopes(scopes, exec_state);
// Get the debug command processor.
var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
diff --git a/deps/v8/test/mjsunit/debug-script.js b/deps/v8/test/mjsunit/debug-script.js
index 1cbdb376c..80d423e10 100644
--- a/deps/v8/test/mjsunit/debug-script.js
+++ b/deps/v8/test/mjsunit/debug-script.js
@@ -59,7 +59,7 @@ for (i = 0; i < scripts.length; i++) {
}
// This has to be updated if the number of native scripts change.
-assertTrue(named_native_count == 16 || named_native_count == 17);
+assertTrue(named_native_count == 19 || named_native_count == 20);
// Only the 'gc' extension is loaded.
assertEquals(1, extension_count);
// This script and mjsunit.js has been loaded. If using d8, d8 loads
diff --git a/deps/v8/test/mjsunit/dehoisted-array-index.js b/deps/v8/test/mjsunit/dehoisted-array-index.js
new file mode 100644
index 000000000..f4a32c103
--- /dev/null
+++ b/deps/v8/test/mjsunit/dehoisted-array-index.js
@@ -0,0 +1,163 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+
+// Key is HParameter
+function aoo(i) {
+ return a[i + 1];
+}
+
+aoo(1);
+aoo(-1);
+%OptimizeFunctionOnNextCall(aoo);
+aoo(-1);
+
+
+// Key is HChange, used by either dehoised or non-dehoisted
+function boo(i) {
+ var ret = 0;
+ if (i < 0) {
+ ret = a[i + 10];
+ } else {
+ ret = a[i];
+ }
+ return ret;
+}
+
+boo(1);
+boo(-1);
+%OptimizeFunctionOnNextCall(boo);
+boo(-1);
+
+
+// Key is HMul(-i ==> i * (-1))
+function coo() {
+ var ret = 0;
+ for (var i = 4; i > 0; i -= 1) {
+ ret += a[-i + 4]; // dehoisted
+ }
+
+ return ret;
+}
+
+coo();
+coo();
+%OptimizeFunctionOnNextCall(coo);
+coo();
+
+
+// Key is HPhi, used only by dehoisted
+function doo() {
+ var ret = 0;
+ for (var i = 0; i < 5; i += 1) {
+ ret += a[i + 1]; // dehoisted
+ }
+ return ret;
+}
+doo();
+doo();
+%OptimizeFunctionOnNextCall(doo);
+doo();
+
+// Key is HPhi, but used by both dehoisted and non-dehoisted
+// sign extend is useless
+function eoo() {
+ var ret = 0;
+ for (var i = 0; i < 5; i += 1) {
+ ret += a[i]; // non-dehoisted
+ ret += a[i + 1]; // dehoisted
+ }
+
+ return ret;
+}
+eoo();
+eoo();
+%OptimizeFunctionOnNextCall(eoo);
+eoo();
+
+
+
+// Key is HPhi, but used by either dehoisted or non-dehoisted
+function foo() {
+ var ret = 0;
+ for (var i = -3; i < 4; i += 1) {
+ if (i < 0) {
+ ret += a[i + 4]; // dehoisted
+ } else {
+ ret += a[i]; // non-dehoisted
+ }
+ }
+
+ return ret;
+}
+
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
+
+// Key is HPhi, but not induction variable
+function goo(i) {
+ if (i > 0) {
+ i += 1;
+ } else {
+ i += -1;
+ }
+
+ return a[i + 3];
+}
+goo(-1);
+goo(-1);
+%OptimizeFunctionOnNextCall(goo);
+goo(-1);
+
+// Key is return value of function
+function index() {
+ return 1;
+}
+%NeverOptimizeFunction(index);
+function hoo() {
+ return a[index() + 3];
+}
+
+hoo();
+hoo();
+%OptimizeFunctionOnNextCall(hoo);
+hoo();
+
+// Sign extension of key makes AssertZeroExtended fail in DoBoundsCheck
+function ioo(i) {
+ return a[i] + a[i + 1];
+}
+
+ioo(1);
+ioo(1);
+%OptimizeFunctionOnNextCall(ioo);
+ioo(-1);
diff --git a/deps/v8/test/mjsunit/deopt-with-fp-regs.js b/deps/v8/test/mjsunit/deopt-with-fp-regs.js
new file mode 100644
index 000000000..10e3d9abb
--- /dev/null
+++ b/deps/v8/test/mjsunit/deopt-with-fp-regs.js
@@ -0,0 +1,90 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+deopt_trigger = 0;
+side_effect = 0;
+
+function test(a, b, c, d, e, v) {
+ // This test expects some specific input values.
+ assertEquals(10.0, a);
+ assertEquals(20.0, b);
+ assertEquals(30.0, c);
+ assertEquals(40.0, d);
+ assertEquals(50.0, e);
+ assertEquals(1.5, v);
+
+ // Perform a few double calculations.
+ a = a * 0.1;
+ b = b * 0.2;
+ c = c * 0.3;
+ d = d * 0.4;
+ e = e * 0.5;
+
+ // Write to a field of a global object. As for any side effect, a HSimulate
+ // will be introduced after the instructions to support this. If we deopt
+ // later in this function, the execution will resume in full-codegen after
+ // this point.
+ side_effect++;
+ // The following field of the global object will be deleted to force a deopt.
+ // If we use type feedback to deopt, then tests ran with --stress-opt will
+ // not deopt after a few iteration.
+ // If we use %DeoptimizeFunction, all values will be on the frame due to the
+ // call and we will not exercise the translation mechanism handling fp
+ // registers.
+ deopt_trigger = v;
+
+ // Do a few more calculations using the previous values after our deopt point
+ // so the floating point registers which hold those values are recorded in the
+ // environment and will be used during deoptimization.
+ a = a * v;
+ b = b * v;
+ c = c * v;
+ d = d * v;
+ e = e * v;
+
+ // Check that we got the expected results.
+ assertEquals(1.5, a);
+ assertEquals(6, b);
+ assertEquals(13.5, c);
+ assertEquals(24, d);
+ assertEquals(37.5, e);
+}
+
+
+test(10.0, 20.0, 30.0, 40.0, 50.0, 1.5);
+test(10.0, 20.0, 30.0, 40.0, 50.0, 1.5);
+%OptimizeFunctionOnNextCall(test);
+test(10.0, 20.0, 30.0, 40.0, 50.0, 1.5);
+assertTrue(2 != %GetOptimizationStatus(test));
+
+// By deleting the field we are forcing the code to deopt when the field is
+// read on next execution.
+delete deopt_trigger;
+test(10.0, 20.0, 30.0, 40.0, 50.0, 1.5);
+assertTrue(1 != %GetOptimizationStatus(test));
diff --git a/deps/v8/test/mjsunit/div-mod.js b/deps/v8/test/mjsunit/div-mod.js
index c3144955c..08cee8cdd 100644
--- a/deps/v8/test/mjsunit/div-mod.js
+++ b/deps/v8/test/mjsunit/div-mod.js
@@ -126,9 +126,15 @@ function compute_mod(dividend, divisor) {
var example_numbers = [
NaN,
0,
+
+ // Due to a bug in fmod(), modulos involving denormals
+ // return the wrong result for glibc <= 2.16.
+ // Details: http://sourceware.org/bugzilla/show_bug.cgi?id=14048
+
Number.MIN_VALUE,
3 * Number.MIN_VALUE,
max_denormal,
+
min_normal,
repeating_decimal,
finite_decimal,
diff --git a/deps/v8/test/mjsunit/double-intrinsics.js b/deps/v8/test/mjsunit/double-intrinsics.js
new file mode 100644
index 000000000..16d653893
--- /dev/null
+++ b/deps/v8/test/mjsunit/double-intrinsics.js
@@ -0,0 +1,36 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function assertDoubleBits(hi, lo, x) {
+ hi = hi | 0;
+ lo = lo | 0;
+ assertEquals(x, %_ConstructDouble(hi, lo));
+ assertEquals(hi, %_DoubleHi(x));
+ assertEquals(lo, %_DoubleLo(x));
+ assertEquals(x, %_ConstructDouble(%_DoubleHi(x), %_DoubleLo(x)));
+}
+
+
+var tests = [0x7ff00000, 0x00000000, Infinity,
+ 0xfff00000, 0x00000000, -Infinity,
+ 0x80000000, 0x00000000, -0,
+ 0x400921fb, 0x54442d18, Math.PI,
+ 0xc00921fb, 0x54442d18, -Math.PI,
+ 0x4005bf0a, 0x8b145769, Math.E,
+ 0xc005bf0a, 0x8b145769, -Math.E,
+ 0xbfe80000, 0x00000000, -0.75];
+
+
+for (var i = 0; i < tests.length; i += 3) {
+ assertDoubleBits(tests[i], tests[i + 1], tests[i + 2]);
+}
+
+%OptimizeFunctionOnNextCall(assertDoubleBits);
+
+for (var i = 0; i < tests.length; i += 3) {
+ assertDoubleBits(tests[i], tests[i + 1], tests[i + 2]);
+ assertOptimized(assertDoubleBits);
+}
diff --git a/deps/v8/test/mjsunit/elements-kind.js b/deps/v8/test/mjsunit/elements-kind.js
index e2bbc31a4..3aa513a37 100644
--- a/deps/v8/test/mjsunit/elements-kind.js
+++ b/deps/v8/test/mjsunit/elements-kind.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc --nostress-opt
+// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc --nostress-opt --typed-array-max_size_in-heap=2048
// Test element kind of objects.
// Since --smi-only-arrays affects builtins, its default setting at compile
@@ -43,19 +43,28 @@ if (support_smi_only_arrays) {
}
var elements_kind = {
- fast_smi_only : 'fast smi only elements',
- fast : 'fast elements',
- fast_double : 'fast double elements',
- dictionary : 'dictionary elements',
- external_byte : 'external byte elements',
- external_unsigned_byte : 'external unsigned byte elements',
- external_short : 'external short elements',
- external_unsigned_short : 'external unsigned short elements',
- external_int : 'external int elements',
- external_unsigned_int : 'external unsigned int elements',
- external_float : 'external float elements',
- external_double : 'external double elements',
- external_pixel : 'external pixel elements'
+ fast_smi_only : 'fast smi only elements',
+ fast : 'fast elements',
+ fast_double : 'fast double elements',
+ dictionary : 'dictionary elements',
+ external_int32 : 'external int8 elements',
+ external_uint8 : 'external uint8 elements',
+ external_int16 : 'external int16 elements',
+ external_uint16 : 'external uint16 elements',
+ external_int32 : 'external int32 elements',
+ external_uint32 : 'external uint32 elements',
+ external_float32 : 'external float32 elements',
+ external_float64 : 'external float64 elements',
+ external_uint8_clamped : 'external uint8_clamped elements',
+ fixed_int32 : 'fixed int8 elements',
+ fixed_uint8 : 'fixed uint8 elements',
+ fixed_int16 : 'fixed int16 elements',
+ fixed_uint16 : 'fixed uint16 elements',
+ fixed_int32 : 'fixed int32 elements',
+ fixed_uint32 : 'fixed uint32 elements',
+ fixed_float32 : 'fixed float32 elements',
+ fixed_float64 : 'fixed float64 elements',
+ fixed_uint8_clamped : 'fixed uint8_clamped elements'
}
function getKind(obj) {
@@ -63,34 +72,61 @@ function getKind(obj) {
if (%HasFastObjectElements(obj)) return elements_kind.fast;
if (%HasFastDoubleElements(obj)) return elements_kind.fast_double;
if (%HasDictionaryElements(obj)) return elements_kind.dictionary;
+
// Every external kind is also an external array.
- assertTrue(%HasExternalArrayElements(obj));
if (%HasExternalInt8Elements(obj)) {
- return elements_kind.external_byte;
+ return elements_kind.external_int8;
}
if (%HasExternalUint8Elements(obj)) {
- return elements_kind.external_unsigned_byte;
+ return elements_kind.external_uint8;
}
if (%HasExternalInt16Elements(obj)) {
- return elements_kind.external_short;
+ return elements_kind.external_int16;
}
if (%HasExternalUint16Elements(obj)) {
- return elements_kind.external_unsigned_short;
+ return elements_kind.external_uint16;
}
if (%HasExternalInt32Elements(obj)) {
- return elements_kind.external_int;
+ return elements_kind.external_int32;
}
if (%HasExternalUint32Elements(obj)) {
- return elements_kind.external_unsigned_int;
+ return elements_kind.external_uint32;
}
if (%HasExternalFloat32Elements(obj)) {
- return elements_kind.external_float;
+ return elements_kind.external_float32;
}
if (%HasExternalFloat64Elements(obj)) {
- return elements_kind.external_double;
+ return elements_kind.external_float64;
}
if (%HasExternalUint8ClampedElements(obj)) {
- return elements_kind.external_pixel;
+ return elements_kind.external_uint8_clamped;
+ }
+ if (%HasFixedInt8Elements(obj)) {
+ return elements_kind.fixed_int8;
+ }
+ if (%HasFixedUint8Elements(obj)) {
+ return elements_kind.fixed_uint8;
+ }
+ if (%HasFixedInt16Elements(obj)) {
+ return elements_kind.fixed_int16;
+ }
+ if (%HasFixedUint16Elements(obj)) {
+ return elements_kind.fixed_uint16;
+ }
+ if (%HasFixedInt32Elements(obj)) {
+ return elements_kind.fixed_int32;
+ }
+ if (%HasFixedUint32Elements(obj)) {
+ return elements_kind.fixed_uint32;
+ }
+ if (%HasFixedFloat32Elements(obj)) {
+ return elements_kind.fixed_float32;
+ }
+ if (%HasFixedFloat64Elements(obj)) {
+ return elements_kind.fixed_float64;
+ }
+ if (%HasFixedUint8ClampedElements(obj)) {
+ return elements_kind.fixed_uint8_clamped;
}
}
@@ -136,15 +172,26 @@ function test_wrapper() {
for (var i = 0; i < 0xDECAF; i++) fast_double_array[i] = i / 2;
assertKind(elements_kind.fast_double, fast_double_array);
- assertKind(elements_kind.external_byte, new Int8Array(9001));
- assertKind(elements_kind.external_unsigned_byte, new Uint8Array(007));
- assertKind(elements_kind.external_short, new Int16Array(666));
- assertKind(elements_kind.external_unsigned_short, new Uint16Array(42));
- assertKind(elements_kind.external_int, new Int32Array(0xF));
- assertKind(elements_kind.external_unsigned_int, new Uint32Array(23));
- assertKind(elements_kind.external_float, new Float32Array(7));
- assertKind(elements_kind.external_double, new Float64Array(0));
- assertKind(elements_kind.external_pixel, new Uint8ClampedArray(512));
+ assertKind(elements_kind.fixed_int8, new Int8Array(007));
+ assertKind(elements_kind.fixed_uint8, new Uint8Array(007));
+ assertKind(elements_kind.fixed_int16, new Int16Array(666));
+ assertKind(elements_kind.fixed_uint16, new Uint16Array(42));
+ assertKind(elements_kind.fixed_int32, new Int32Array(0xF));
+ assertKind(elements_kind.fixed_uint32, new Uint32Array(23));
+ assertKind(elements_kind.fixed_float32, new Float32Array(7));
+ assertKind(elements_kind.fixed_float64, new Float64Array(0));
+ assertKind(elements_kind.fixed_uint8_clamped, new Uint8ClampedArray(512));
+
+ var ab = new ArrayBuffer(128);
+ assertKind(elements_kind.external_int8, new Int8Array(ab));
+ assertKind(elements_kind.external_uint8, new Uint8Array(ab));
+ assertKind(elements_kind.external_int16, new Int16Array(ab));
+ assertKind(elements_kind.external_uint16, new Uint16Array(ab));
+ assertKind(elements_kind.external_int32, new Int32Array(ab));
+ assertKind(elements_kind.external_uint32, new Uint32Array(ab));
+ assertKind(elements_kind.external_float32, new Float32Array(ab));
+ assertKind(elements_kind.external_float64, new Float64Array(ab));
+ assertKind(elements_kind.external_uint8_clamped, new Uint8ClampedArray(ab));
// Crankshaft support for smi-only array elements.
function monomorphic(array) {
diff --git a/deps/v8/test/mjsunit/es6/math-cbrt.js b/deps/v8/test/mjsunit/es6/math-cbrt.js
new file mode 100644
index 000000000..83d9eb5d7
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/math-cbrt.js
@@ -0,0 +1,27 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-maths
+
+assertTrue(isNaN(Math.cbrt(NaN)));
+assertTrue(isNaN(Math.cbrt(function() {})));
+assertTrue(isNaN(Math.cbrt({ toString: function() { return NaN; } })));
+assertTrue(isNaN(Math.cbrt({ valueOf: function() { return "abc"; } })));
+assertEquals("Infinity", String(1/Math.cbrt(0)));
+assertEquals("-Infinity", String(1/Math.cbrt(-0)));
+assertEquals("Infinity", String(Math.cbrt(Infinity)));
+assertEquals("-Infinity", String(Math.cbrt(-Infinity)));
+
+for (var i = 1E-100; i < 1E100; i *= Math.PI) {
+ assertEqualsDelta(i, Math.cbrt(i*i*i), i * 1E-15);
+}
+
+for (var i = -1E-100; i > -1E100; i *= Math.E) {
+ assertEqualsDelta(i, Math.cbrt(i*i*i), -i * 1E-15);
+}
+
+// Let's be exact at least for small integers.
+for (var i = 2; i < 10000; i++) {
+ assertEquals(i, Math.cbrt(i*i*i));
+}
diff --git a/deps/v8/test/mjsunit/es6/math-clz32.js b/deps/v8/test/mjsunit/es6/math-clz32.js
new file mode 100644
index 000000000..816f6a936
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/math-clz32.js
@@ -0,0 +1,36 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-maths --allow-natives-syntax
+
+[NaN, Infinity, -Infinity, 0, -0, "abc", "Infinity", "-Infinity", {}].forEach(
+ function(x) {
+ assertEquals(32, Math.clz32(x));
+ }
+);
+
+function testclz(x) {
+ for (var i = 0; i < 33; i++) {
+ if (x & 0x80000000) return i;
+ x <<= 1;
+ }
+ return 32;
+}
+
+
+function f(e) {
+ var max = Math.pow(2, e);
+ for (var x = 0; x < max; x = x * 1.01 + 1) {
+ assertEquals(testclz(x), Math.clz32(x));
+ assertEquals(testclz(-x), Math.clz32(-x));
+ assertEquals(testclz(x), Math.clz32({ valueOf: function() { return x; } }));
+ assertEquals(testclz(-x),
+ Math.clz32({ toString: function() { return -x; } }));
+ }
+}
+
+f(5);
+f(5);
+%OptimizeFunctionOnNextCall(f);
+f(40);
diff --git a/deps/v8/test/mjsunit/es6/math-expm1.js b/deps/v8/test/mjsunit/es6/math-expm1.js
new file mode 100644
index 000000000..de915c096
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/math-expm1.js
@@ -0,0 +1,38 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-maths --no-fast-math
+
+assertTrue(isNaN(Math.expm1(NaN)));
+assertTrue(isNaN(Math.expm1(function() {})));
+assertTrue(isNaN(Math.expm1({ toString: function() { return NaN; } })));
+assertTrue(isNaN(Math.expm1({ valueOf: function() { return "abc"; } })));
+assertEquals("Infinity", String(1/Math.expm1(0)));
+assertEquals("-Infinity", String(1/Math.expm1(-0)));
+assertEquals("Infinity", String(Math.expm1(Infinity)));
+assertEquals(-1, Math.expm1(-Infinity));
+
+for (var x = 0.1; x < 700; x += 0.1) {
+ var expected = Math.exp(x) - 1;
+ assertEqualsDelta(expected, Math.expm1(x), expected * 1E-14);
+ expected = Math.exp(-x) - 1;
+ assertEqualsDelta(expected, Math.expm1(-x), -expected * 1E-14);
+}
+
+// Values close to 0:
+// Use six terms of Taylor expansion at 0 for exp(x) as test expectation:
+// exp(x) - 1 == exp(0) + exp(0) * x + x * x / 2 + ... - 1
+// == x + x * x / 2 + x * x * x / 6 + ...
+function expm1(x) {
+ return x * (1 + x * (1/2 + x * (
+ 1/6 + x * (1/24 + x * (
+ 1/120 + x * (1/720 + x * (
+ 1/5040 + x * (1/40320 + x*(
+ 1/362880 + x * (1/3628800))))))))));
+}
+
+for (var x = 1E-1; x > 1E-300; x *= 0.8) {
+ var expected = expm1(x);
+ assertEqualsDelta(expected, Math.expm1(x), expected * 1E-14);
+}
diff --git a/deps/v8/test/mjsunit/es6/math-fround.js b/deps/v8/test/mjsunit/es6/math-fround.js
new file mode 100644
index 000000000..ea432ea2d
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/math-fround.js
@@ -0,0 +1,99 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-maths
+
+// Monkey-patch Float32Array.
+Float32Array = function(x) { this[0] = 0; };
+
+assertTrue(isNaN(Math.fround(NaN)));
+assertTrue(isNaN(Math.fround(function() {})));
+assertTrue(isNaN(Math.fround({ toString: function() { return NaN; } })));
+assertTrue(isNaN(Math.fround({ valueOf: function() { return "abc"; } })));
+assertEquals("Infinity", String(1/Math.fround(0)));
+assertEquals("-Infinity", String(1/Math.fround(-0)));
+assertEquals("Infinity", String(Math.fround(Infinity)));
+assertEquals("-Infinity", String(Math.fround(-Infinity)));
+
+assertEquals("Infinity", String(Math.fround(1E200)));
+assertEquals("-Infinity", String(Math.fround(-1E200)));
+assertEquals("Infinity", String(1/Math.fround(1E-300)));
+assertEquals("-Infinity", String(1/Math.fround(-1E-300)));
+
+mantissa_23_shift = Math.pow(2, -23);
+mantissa_29_shift = Math.pow(2, -23-29);
+
+// Javascript implementation of IEEE 754 to test double to single conversion.
+function ieee754float(sign_bit,
+ exponent_bits,
+ mantissa_23_bits,
+ mantissa_29_bits) {
+ this.sign_bit = sign_bit & 1;
+ this.exponent_bits = exponent_bits & ((1 << 11) - 1);
+ this.mantissa_23_bits = mantissa_23_bits & ((1 << 23) - 1);
+ this.mantissa_29_bits = mantissa_29_bits & ((1 << 29) - 1);
+}
+
+ieee754float.prototype.returnSpecial = function() {
+ if (mantissa_23_bits == 0 && mantissa_29_bits == 0) return sign * Infinity;
+ return NaN;
+}
+
+ieee754float.prototype.toDouble = function() {
+ var sign = this.sign_bit ? -1 : 1;
+ var exponent = this.exponent_bits - 1023;
+ if (exponent == -1023) returnSpecial();
+ var mantissa = 1 + this.mantissa_23_bits * mantissa_23_shift +
+ this.mantissa_29_bits * mantissa_29_shift;
+ return sign * Math.pow(2, exponent) * mantissa;
+}
+
+ieee754float.prototype.toSingle = function() {
+ var sign = this.sign_bit ? -1 : 1;
+ var exponent = this.exponent_bits - 1023;
+ if (exponent == -1023) returnSpecial();
+ if (exponent > 127) return sign * Infinity;
+ if (exponent < -126) return this.toSingleSubnormal(sign, exponent);
+ var round = this.mantissa_29_bits >> 28;
+ var mantissa = 1 + (this.mantissa_23_bits + round) * mantissa_23_shift;
+ return sign * Math.pow(2, exponent) * mantissa;
+}
+
+ieee754float.prototype.toSingleSubnormal = function(sign, exponent) {
+ var shift = -126 - exponent;
+ if (shift > 24) return sign * 0;
+ var round_mask = 1 << (shift - 1);
+ var mantissa_23_bits = this.mantissa_23_bits + (1 << 23);
+ var round = ((mantissa_23_bits & round_mask) != 0) | 0;
+ if (round) { // Round to even if tied.
+ var tied_mask = round_mask - 1;
+ var result_last_bit_mask = 1 << shift;
+ var tied = this.mantissa_29_bits == 0 &&
+ (mantissa_23_bits & tied_mask ) == 0;
+ var result_already_even = (mantissa_23_bits & result_last_bit_mask) == 0;
+ if (tied && result_already_even) round = 0;
+ }
+ mantissa_23_bits >>= shift;
+ var mantissa = (mantissa_23_bits + round) * mantissa_23_shift;
+ return sign * Math.pow(2, -126) * mantissa;
+}
+
+
+var pi = new ieee754float(0, 0x400, 0x490fda, 0x14442d18);
+assertEquals(pi.toSingle(), Math.fround(pi.toDouble()));
+
+function fuzz_mantissa(sign, exp, m1inc, m2inc) {
+ for (var m1 = 0; m1 < (1 << 23); m1 += m1inc) {
+ for (var m2 = 0; m2 < (1 << 29); m2 += m2inc) {
+ var float = new ieee754float(sign, exp, m1, m2);
+ assertEquals(float.toSingle(), Math.fround(float.toDouble()));
+ }
+ }
+}
+
+for (var sign = 0; sign < 2; sign++) {
+ for (var exp = 1024 - 170; exp < 1024 + 170; exp++) {
+ fuzz_mantissa(sign, exp, 1337 * exp - sign, 127913 * exp - sign);
+ }
+}
diff --git a/deps/v8/test/mjsunit/harmony/math-hyperbolic.js b/deps/v8/test/mjsunit/es6/math-hyperbolic.js
index 604448d89..c45a19c52 100644
--- a/deps/v8/test/mjsunit/harmony/math-hyperbolic.js
+++ b/deps/v8/test/mjsunit/es6/math-hyperbolic.js
@@ -60,7 +60,7 @@ function test_id(fun, rev, value) {
});
-[Math.sinh, Math.asinh, Math.cosh].forEach(function(fun) {
+[Math.sinh, Math.asinh].forEach(function(fun) {
assertEquals("-Infinity", String(fun(-Infinity)));
assertEquals("Infinity", String(fun(Infinity)));
assertEquals("-Infinity", String(fun("-Infinity")));
@@ -68,6 +68,12 @@ function test_id(fun, rev, value) {
});
+assertEquals("Infinity", String(Math.cosh(-Infinity)));
+assertEquals("Infinity", String(Math.cosh(Infinity)));
+assertEquals("Infinity", String(Math.cosh("-Infinity")));
+assertEquals("Infinity", String(Math.cosh("Infinity")));
+
+
assertEquals("-Infinity", String(Math.atanh(-1)));
assertEquals("Infinity", String(Math.atanh(1)));
diff --git a/deps/v8/test/mjsunit/harmony/math-hypot.js b/deps/v8/test/mjsunit/es6/math-hypot.js
index 105262721..105262721 100644
--- a/deps/v8/test/mjsunit/harmony/math-hypot.js
+++ b/deps/v8/test/mjsunit/es6/math-hypot.js
diff --git a/deps/v8/test/mjsunit/es6/math-log1p.js b/deps/v8/test/mjsunit/es6/math-log1p.js
new file mode 100644
index 000000000..eefea6ee3
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/math-log1p.js
@@ -0,0 +1,41 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-maths
+
+assertTrue(isNaN(Math.log1p(NaN)));
+assertTrue(isNaN(Math.log1p(function() {})));
+assertTrue(isNaN(Math.log1p({ toString: function() { return NaN; } })));
+assertTrue(isNaN(Math.log1p({ valueOf: function() { return "abc"; } })));
+assertEquals("Infinity", String(1/Math.log1p(0)));
+assertEquals("-Infinity", String(1/Math.log1p(-0)));
+assertEquals("Infinity", String(Math.log1p(Infinity)));
+assertEquals("-Infinity", String(Math.log1p(-1)));
+assertTrue(isNaN(Math.log1p(-2)));
+assertTrue(isNaN(Math.log1p(-Infinity)));
+
+for (var x = 1E300; x > 1E-1; x *= 0.8) {
+ var expected = Math.log(x + 1);
+ assertEqualsDelta(expected, Math.log1p(x), expected * 1E-14);
+}
+
+// Values close to 0:
+// Use Taylor expansion at 1 for log(x) as test expectation:
+// log1p(x) == log(x + 1) == 0 + x / 1 - x^2 / 2 + x^3 / 3 - ...
+function log1p(x) {
+ var terms = [];
+ var prod = x;
+ for (var i = 1; i <= 20; i++) {
+ terms.push(prod / i);
+ prod *= -x;
+ }
+ var sum = 0;
+ while (terms.length > 0) sum += terms.pop();
+ return sum;
+}
+
+for (var x = 1E-1; x > 1E-300; x *= 0.8) {
+ var expected = log1p(x);
+ assertEqualsDelta(expected, Math.log1p(x), expected * 1E-14);
+}
diff --git a/deps/v8/test/mjsunit/harmony/math-log2-log10.js b/deps/v8/test/mjsunit/es6/math-log2-log10.js
index 2ab496012..2ab496012 100644
--- a/deps/v8/test/mjsunit/harmony/math-log2-log10.js
+++ b/deps/v8/test/mjsunit/es6/math-log2-log10.js
diff --git a/deps/v8/test/mjsunit/harmony/math-sign.js b/deps/v8/test/mjsunit/es6/math-sign.js
index 8a89d6282..8a89d6282 100644
--- a/deps/v8/test/mjsunit/harmony/math-sign.js
+++ b/deps/v8/test/mjsunit/es6/math-sign.js
diff --git a/deps/v8/test/mjsunit/harmony/math-trunc.js b/deps/v8/test/mjsunit/es6/math-trunc.js
index ed91ed138..ed91ed138 100644
--- a/deps/v8/test/mjsunit/harmony/math-trunc.js
+++ b/deps/v8/test/mjsunit/es6/math-trunc.js
diff --git a/deps/v8/test/mjsunit/es6/microtask-delivery.js b/deps/v8/test/mjsunit/es6/microtask-delivery.js
new file mode 100644
index 000000000..f74385e63
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/microtask-delivery.js
@@ -0,0 +1,168 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var ordering = [];
+function reset() {
+ ordering = [];
+}
+
+function assertArrayValues(expected, actual) {
+ assertEquals(expected.length, actual.length);
+ for (var i = 0; i < expected.length; i++) {
+ assertEquals(expected[i], actual[i]);
+ }
+}
+
+function assertOrdering(expected) {
+ %RunMicrotasks();
+ assertArrayValues(expected, ordering);
+}
+
+function newPromise(id, fn) {
+ var r;
+ var t = 1;
+ var promise = new Promise(function(resolve) {
+ r = resolve;
+ if (fn) fn();
+ });
+
+ var next = promise.then(function(value) {
+ ordering.push('p' + id);
+ return value;
+ });
+
+ return {
+ resolve: r,
+ then: function(fn) {
+ next = next.then(function(value) {
+ ordering.push('p' + id + ':' + t++);
+ return fn ? fn(value) : value;
+ });
+
+ return this;
+ }
+ };
+}
+
+function newObserver(id, fn, obj) {
+ var observer = {
+ value: 1,
+ recordCounts: []
+ };
+
+ Object.observe(observer, function(records) {
+ ordering.push('o' + id);
+ observer.recordCounts.push(records.length);
+ if (fn) fn();
+ });
+
+ return observer;
+}
+
+
+(function PromiseThens() {
+ reset();
+
+ var p1 = newPromise(1).then();
+ var p2 = newPromise(2).then();
+
+ p1.resolve();
+ p2.resolve();
+
+ assertOrdering(['p1', 'p2', 'p1:1', 'p2:1']);
+})();
+
+
+(function ObserversBatch() {
+ reset();
+
+ var p1 = newPromise(1);
+ var p2 = newPromise(2);
+ var p3 = newPromise(3);
+
+ var ob1 = newObserver(1);
+ var ob2 = newObserver(2, function() {
+ ob3.value++;
+ p3.resolve();
+ ob1.value++;
+ });
+ var ob3 = newObserver(3);
+
+ p1.resolve();
+ ob1.value++;
+ p2.resolve();
+ ob2.value++;
+
+ assertOrdering(['p1', 'o1', 'o2', 'p2', 'o1', 'o3', 'p3']);
+ assertArrayValues([1, 1], ob1.recordCounts);
+ assertArrayValues([1], ob2.recordCounts);
+ assertArrayValues([1], ob3.recordCounts);
+})();
+
+
+(function ObserversGetAllRecords() {
+ reset();
+
+ var p1 = newPromise(1);
+ var p2 = newPromise(2);
+ var ob1 = newObserver(1, function() {
+ ob2.value++;
+ });
+ var ob2 = newObserver(2);
+
+ p1.resolve();
+ ob1.value++;
+ p2.resolve();
+ ob2.value++;
+
+ assertOrdering(['p1', 'o1', 'o2', 'p2']);
+ assertArrayValues([1], ob1.recordCounts);
+ assertArrayValues([2], ob2.recordCounts);
+})();
+
+
+(function NewObserverDeliveryGetsNewMicrotask() {
+ reset();
+
+ var p1 = newPromise(1);
+ var p2 = newPromise(2);
+ var ob1 = newObserver(1);
+ var ob2 = newObserver(2, function() {
+ ob1.value++;
+ });
+
+ p1.resolve();
+ ob1.value++;
+ p2.resolve();
+ ob2.value++;
+
+ assertOrdering(['p1', 'o1', 'o2', 'p2', 'o1']);
+ assertArrayValues([1, 1], ob1.recordCounts);
+ assertArrayValues([1], ob2.recordCounts);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/promises.js b/deps/v8/test/mjsunit/es6/promises.js
index 38ccd7fb2..96a7bbbf3 100644
--- a/deps/v8/test/mjsunit/harmony/promises.js
+++ b/deps/v8/test/mjsunit/es6/promises.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-promises --harmony-observation --allow-natives-syntax
+// Flags: --allow-natives-syntax
var asyncAssertsExpected = 0;
@@ -82,8 +82,8 @@ function assertAsyncDone(iteration) {
})();
(function() {
- Promise.resolve(5);
- Promise.resolve(5).chain(undefined, assertUnreachable).chain(
+ Promise.accept(5);
+ Promise.accept(5).chain(undefined, assertUnreachable).chain(
function(x) { assertAsync(x === 5, "resolved/chain-nohandler") },
assertUnreachable
)
@@ -99,8 +99,13 @@ function assertAsyncDone(iteration) {
})();
(function() {
- Promise.resolve(5).then(undefined, assertUnreachable).chain(
- function(x) { assertAsync(x === 5, "resolved/then-nohandler") },
+ Promise.accept(5).then(undefined, assertUnreachable).chain(
+ function(x) { assertAsync(x === 5, "resolved/then-nohandler-undefined") },
+ assertUnreachable
+ )
+ assertAsyncRan()
+ Promise.accept(6).then(null, assertUnreachable).chain(
+ function(x) { assertAsync(x === 6, "resolved/then-nohandler-null") },
assertUnreachable
)
assertAsyncRan()
@@ -109,15 +114,20 @@ function assertAsyncDone(iteration) {
(function() {
Promise.reject(5).then(assertUnreachable, undefined).chain(
assertUnreachable,
- function(r) { assertAsync(r === 5, "rejected/then-nohandler") }
+ function(r) { assertAsync(r === 5, "rejected/then-nohandler-undefined") }
+ )
+ assertAsyncRan()
+ Promise.reject(6).then(assertUnreachable, null).chain(
+ assertUnreachable,
+ function(r) { assertAsync(r === 6, "rejected/then-nohandler-null") }
)
assertAsyncRan()
})();
(function() {
- var p1 = Promise.resolve(5)
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
+ var p1 = Promise.accept(5)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
p3.chain(
function(x) { assertAsync(x === p2, "resolved/chain") },
assertUnreachable
@@ -126,9 +136,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
+ var p1 = Promise.accept(5)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
p3.then(
function(x) { assertAsync(x === 5, "resolved/then") },
assertUnreachable
@@ -138,8 +148,8 @@ function assertAsyncDone(iteration) {
(function() {
var p1 = Promise.reject(5)
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
p3.chain(
function(x) { assertAsync(x === p2, "rejected/chain") },
assertUnreachable
@@ -149,8 +159,8 @@ function assertAsyncDone(iteration) {
(function() {
var p1 = Promise.reject(5)
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
p3.then(
assertUnreachable,
function(x) { assertAsync(x === 5, "rejected/then") }
@@ -159,9 +169,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
+ var p1 = Promise.accept(5)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
p3.chain(function(x) { return x }, assertUnreachable).chain(
function(x) { assertAsync(x === p1, "resolved/chain/chain") },
assertUnreachable
@@ -170,9 +180,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
+ var p1 = Promise.accept(5)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
p3.chain(function(x) { return x }, assertUnreachable).then(
function(x) { assertAsync(x === 5, "resolved/chain/then") },
assertUnreachable
@@ -181,9 +191,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
+ var p1 = Promise.accept(5)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
p3.chain(function(x) { return 6 }, assertUnreachable).chain(
function(x) { assertAsync(x === 6, "resolved/chain/chain2") },
assertUnreachable
@@ -192,9 +202,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
+ var p1 = Promise.accept(5)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
p3.chain(function(x) { return 6 }, assertUnreachable).then(
function(x) { assertAsync(x === 6, "resolved/chain/then2") },
assertUnreachable
@@ -203,9 +213,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
+ var p1 = Promise.accept(5)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
p3.then(function(x) { return x + 1 }, assertUnreachable).chain(
function(x) { assertAsync(x === 6, "resolved/then/chain") },
assertUnreachable
@@ -214,9 +224,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
+ var p1 = Promise.accept(5)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
p3.then(function(x) { return x + 1 }, assertUnreachable).then(
function(x) { assertAsync(x === 6, "resolved/then/then") },
assertUnreachable
@@ -225,10 +235,10 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
- p3.then(function(x){ return Promise.resolve(x+1) }, assertUnreachable).chain(
+ var p1 = Promise.accept(5)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
+ p3.then(function(x){ return Promise.accept(x+1) }, assertUnreachable).chain(
function(x) { assertAsync(x === 6, "resolved/then/chain2") },
assertUnreachable
)
@@ -236,10 +246,10 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
- p3.then(function(x) { return Promise.resolve(x+1) }, assertUnreachable).then(
+ var p1 = Promise.accept(5)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
+ p3.then(function(x) { return Promise.accept(x+1) }, assertUnreachable).then(
function(x) { assertAsync(x === 6, "resolved/then/then2") },
assertUnreachable
)
@@ -247,9 +257,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
+ var p1 = Promise.accept(5)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
p3.chain(function(x) { throw 6 }, assertUnreachable).chain(
assertUnreachable,
function(x) { assertAsync(x === 6, "resolved/chain-throw/chain") }
@@ -258,9 +268,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
+ var p1 = Promise.accept(5)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
p3.chain(function(x) { throw 6 }, assertUnreachable).then(
assertUnreachable,
function(x) { assertAsync(x === 6, "resolved/chain-throw/then") }
@@ -269,9 +279,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
+ var p1 = Promise.accept(5)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
p3.then(function(x) { throw 6 }, assertUnreachable).chain(
assertUnreachable,
function(x) { assertAsync(x === 6, "resolved/then-throw/chain") }
@@ -280,9 +290,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
+ var p1 = Promise.accept(5)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
p3.then(function(x) { throw 6 }, assertUnreachable).then(
assertUnreachable,
function(x) { assertAsync(x === 6, "resolved/then-throw/then") }
@@ -291,9 +301,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
+ var p1 = Promise.accept(5)
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
- var p3 = Promise.resolve(p2)
+ var p3 = Promise.accept(p2)
p3.chain(
function(x) { assertAsync(x === p2, "resolved/thenable/chain") },
assertUnreachable
@@ -302,9 +312,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
+ var p1 = Promise.accept(5)
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
- var p3 = Promise.resolve(p2)
+ var p3 = Promise.accept(p2)
p3.then(
function(x) { assertAsync(x === 5, "resolved/thenable/then") },
assertUnreachable
@@ -315,7 +325,7 @@ function assertAsyncDone(iteration) {
(function() {
var p1 = Promise.reject(5)
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
- var p3 = Promise.resolve(p2)
+ var p3 = Promise.accept(p2)
p3.chain(
function(x) { assertAsync(x === p2, "rejected/thenable/chain") },
assertUnreachable
@@ -326,7 +336,7 @@ function assertAsyncDone(iteration) {
(function() {
var p1 = Promise.reject(5)
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
- var p3 = Promise.resolve(p2)
+ var p3 = Promise.accept(p2)
p3.then(
assertUnreachable,
function(x) { assertAsync(x === 5, "rejected/thenable/then") }
@@ -337,8 +347,8 @@ function assertAsyncDone(iteration) {
(function() {
var deferred = Promise.defer()
var p1 = deferred.promise
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
p3.chain(
function(x) { assertAsync(x === p2, "chain/resolve") },
assertUnreachable
@@ -350,8 +360,8 @@ function assertAsyncDone(iteration) {
(function() {
var deferred = Promise.defer()
var p1 = deferred.promise
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
p3.then(
function(x) { assertAsync(x === 5, "then/resolve") },
assertUnreachable
@@ -363,8 +373,8 @@ function assertAsyncDone(iteration) {
(function() {
var deferred = Promise.defer()
var p1 = deferred.promise
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
p3.chain(
function(x) { assertAsync(x === p2, "chain/reject") },
assertUnreachable
@@ -376,8 +386,8 @@ function assertAsyncDone(iteration) {
(function() {
var deferred = Promise.defer()
var p1 = deferred.promise
- var p2 = Promise.resolve(p1)
- var p3 = Promise.resolve(p2)
+ var p2 = Promise.accept(p1)
+ var p3 = Promise.accept(p2)
p3.then(
assertUnreachable,
function(x) { assertAsync(x === 5, "then/reject") }
@@ -390,7 +400,7 @@ function assertAsyncDone(iteration) {
var deferred = Promise.defer()
var p1 = deferred.promise
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
- var p3 = Promise.resolve(p2)
+ var p3 = Promise.accept(p2)
p3.chain(
function(x) { assertAsync(x === p2, "chain/resolve/thenable") },
assertUnreachable
@@ -403,7 +413,7 @@ function assertAsyncDone(iteration) {
var deferred = Promise.defer()
var p1 = deferred.promise
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
- var p3 = Promise.resolve(p2)
+ var p3 = Promise.accept(p2)
p3.then(
function(x) { assertAsync(x === 5, "then/resolve/thenable") },
assertUnreachable
@@ -416,7 +426,7 @@ function assertAsyncDone(iteration) {
var deferred = Promise.defer()
var p1 = deferred.promise
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
- var p3 = Promise.resolve(p2)
+ var p3 = Promise.accept(p2)
p3.chain(
function(x) { assertAsync(x === p2, "chain/reject/thenable") },
assertUnreachable
@@ -429,7 +439,7 @@ function assertAsyncDone(iteration) {
var deferred = Promise.defer()
var p1 = deferred.promise
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
- var p3 = Promise.resolve(p2)
+ var p3 = Promise.accept(p2)
p3.then(
assertUnreachable,
function(x) { assertAsync(x === 5, "then/reject/thenable") }
@@ -439,8 +449,8 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
- var p2 = Promise.resolve(p1)
+ var p1 = Promise.accept(5)
+ var p2 = Promise.accept(p1)
var deferred = Promise.defer()
var p3 = deferred.promise
p3.chain(
@@ -452,8 +462,8 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
- var p2 = Promise.resolve(p1)
+ var p1 = Promise.accept(5)
+ var p2 = Promise.accept(p1)
var deferred = Promise.defer()
var p3 = deferred.promise
p3.then(
@@ -465,8 +475,8 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
- var p2 = Promise.resolve(p1)
+ var p1 = Promise.accept(5)
+ var p2 = Promise.accept(p1)
var deferred = Promise.defer()
var p3 = deferred.promise
p3.chain(
@@ -478,8 +488,8 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
- var p2 = Promise.resolve(p1)
+ var p1 = Promise.accept(5)
+ var p2 = Promise.accept(p1)
var deferred = Promise.defer()
var p3 = deferred.promise
p3.then(
@@ -491,7 +501,7 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
+ var p1 = Promise.accept(5)
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
var deferred = Promise.defer()
var p3 = deferred.promise
@@ -504,7 +514,7 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(5)
+ var p1 = Promise.accept(5)
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
var deferred = Promise.defer()
var p3 = deferred.promise
@@ -517,7 +527,7 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(0)
+ var p1 = Promise.accept(0)
var p2 = p1.chain(function(x) { return p2 }, assertUnreachable)
p2.chain(
assertUnreachable,
@@ -527,7 +537,7 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(0)
+ var p1 = Promise.accept(0)
var p2 = p1.then(function(x) { return p2 }, assertUnreachable)
p2.chain(
assertUnreachable,
@@ -559,9 +569,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- Promise.all({get length() { throw 666 }}).chain(
+ Promise.all({}).chain(
assertUnreachable,
- function(r) { assertAsync(r === 666, "all/no-array") }
+ function(r) { assertAsync(r instanceof TypeError, "all/no-array") }
)
assertAsyncRan()
})();
@@ -602,7 +612,7 @@ function assertAsyncDone(iteration) {
(function() {
var deferred = Promise.defer()
var p1 = deferred.promise
- var p2 = Promise.resolve(2)
+ var p2 = Promise.accept(2)
var p3 = Promise.defer().promise
Promise.all([p1, p2, p3]).chain(
assertUnreachable,
@@ -636,9 +646,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(1)
- var p2 = Promise.resolve(2)
- var p3 = Promise.resolve(3)
+ var p1 = Promise.accept(1)
+ var p2 = Promise.accept(2)
+ var p3 = Promise.accept(3)
Promise.race([p1, p2, p3]).chain(
function(x) { assertAsync(x === 1, "resolved/one") },
assertUnreachable
@@ -647,9 +657,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.resolve(1)
- var p2 = Promise.resolve(2)
- var p3 = Promise.resolve(3)
+ var p1 = Promise.accept(1)
+ var p2 = Promise.accept(2)
+ var p3 = Promise.accept(3)
Promise.race([0, p1, p2, p3]).chain(
function(x) { assertAsync(x === 0, "resolved-const/one") },
assertUnreachable
@@ -658,9 +668,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- Promise.race({get length() { throw 666 }}).chain(
+ Promise.race({}).chain(
assertUnreachable,
- function(r) { assertAsync(r === 666, "one/no-array") }
+ function(r) { assertAsync(r instanceof TypeError, "one/no-array") }
)
assertAsyncRan()
})();
@@ -684,7 +694,7 @@ function assertAsyncDone(iteration) {
(function() {
var deferred = Promise.defer()
var p1 = deferred.promise
- var p2 = Promise.resolve(2)
+ var p2 = Promise.accept(2)
var p3 = Promise.defer().promise
Promise.race([p1, p2, p3]).chain(
function(x) { assertAsync(x === 2, "resolved/one") },
@@ -779,12 +789,12 @@ function assertAsyncDone(iteration) {
assertTrue(log === "dncncnx6", "subclass/chain")
log = ""
- Promise.all([11, Promise.resolve(12), 13, MyPromise.resolve(14), 15, 16])
- assertTrue(log === "nx14cn", "subclass/all/arg")
+ Promise.all([11, Promise.accept(12), 13, MyPromise.accept(14), 15, 16])
+ assertTrue(log === "nx14n", "subclass/all/arg")
log = ""
- MyPromise.all([21, Promise.resolve(22), 23, MyPromise.resolve(24), 25, 26])
- assertTrue(log === "nx24nnx21cnnx23cncnnx25cnnx26cn", "subclass/all/self")
+ MyPromise.all([21, Promise.accept(22), 23, MyPromise.accept(24), 25, 26])
+ assertTrue(log === "nx24nnx21nnx23nnnx25nnx26n", "subclass/all/self")
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-2034.js b/deps/v8/test/mjsunit/es6/regress/regress-2034.js
index c510f97fc..5c738bf84 100644
--- a/deps/v8/test/mjsunit/regress/regress-2034.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-2034.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-collections
-
var key = {};
var map = new WeakMap;
Object.preventExtensions(key);
diff --git a/deps/v8/test/mjsunit/regress/regress-2156.js b/deps/v8/test/mjsunit/es6/regress/regress-2156.js
index 348257113..fba2a2986 100644
--- a/deps/v8/test/mjsunit/regress/regress-2156.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-2156.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --harmony-collections
+// Flags: --allow-natives-syntax
var key1 = {};
var key2 = {};
diff --git a/deps/v8/test/mjsunit/regress/regress-2829.js b/deps/v8/test/mjsunit/es6/regress/regress-2829.js
index a046ae039..b48039cf0 100644
--- a/deps/v8/test/mjsunit/regress/regress-2829.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-2829.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-collections
-
(function test1() {
var wm1 = new WeakMap();
wm1.set(Object.prototype, 23);
diff --git a/deps/v8/test/mjsunit/es6/weak_collections.js b/deps/v8/test/mjsunit/es6/weak_collections.js
new file mode 100644
index 000000000..74235e7d2
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/weak_collections.js
@@ -0,0 +1,333 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc --allow-natives-syntax
+
+
+// Note: this test is superseded by harmony/collections.js.
+// IF YOU CHANGE THIS FILE, apply the same changes to harmony/collections.js!
+// TODO(rossberg): Remove once non-weak collections have caught up.
+
+// Test valid getter and setter calls on WeakSets.
+function TestValidSetCalls(m) {
+ assertDoesNotThrow(function () { m.add(new Object) });
+ assertDoesNotThrow(function () { m.has(new Object) });
+ assertDoesNotThrow(function () { m.delete(new Object) });
+}
+TestValidSetCalls(new WeakSet);
+
+
+// Test valid getter and setter calls on WeakMaps
+function TestValidMapCalls(m) {
+ assertDoesNotThrow(function () { m.get(new Object) });
+ assertDoesNotThrow(function () { m.set(new Object) });
+ assertDoesNotThrow(function () { m.has(new Object) });
+ assertDoesNotThrow(function () { m.delete(new Object) });
+}
+TestValidMapCalls(new WeakMap);
+
+
+// Test invalid getter and setter calls for WeakMap
+function TestInvalidCalls(m) {
+ assertThrows(function () { m.get(undefined) }, TypeError);
+ assertThrows(function () { m.set(undefined, 0) }, TypeError);
+ assertThrows(function () { m.get(null) }, TypeError);
+ assertThrows(function () { m.set(null, 0) }, TypeError);
+ assertThrows(function () { m.get(0) }, TypeError);
+ assertThrows(function () { m.set(0, 0) }, TypeError);
+ assertThrows(function () { m.get('a-key') }, TypeError);
+ assertThrows(function () { m.set('a-key', 0) }, TypeError);
+}
+TestInvalidCalls(new WeakMap);
+
+
+// Test expected behavior for WeakSets
+function TestSet(set, key) {
+ assertFalse(set.has(key));
+ assertSame(undefined, set.add(key));
+ assertTrue(set.has(key));
+ assertTrue(set.delete(key));
+ assertFalse(set.has(key));
+ assertFalse(set.delete(key));
+ assertFalse(set.has(key));
+}
+function TestSetBehavior(set) {
+ for (var i = 0; i < 20; i++) {
+ TestSet(set, new Object);
+ TestSet(set, i);
+ TestSet(set, i / 100);
+ TestSet(set, 'key-' + i);
+ }
+ var keys = [ +0, -0, +Infinity, -Infinity, true, false, null, undefined ];
+ for (var i = 0; i < keys.length; i++) {
+ TestSet(set, keys[i]);
+ }
+}
+TestSet(new WeakSet, new Object);
+
+
+// Test expected mapping behavior for WeakMaps
+function TestMapping(map, key, value) {
+ assertSame(undefined, map.set(key, value));
+ assertSame(value, map.get(key));
+}
+function TestMapBehavior1(m) {
+ TestMapping(m, new Object, 23);
+ TestMapping(m, new Object, 'the-value');
+ TestMapping(m, new Object, new Object);
+}
+TestMapBehavior1(new WeakMap);
+
+
+// Test expected querying behavior of WeakMaps
+function TestQuery(m) {
+ var key = new Object;
+ var values = [ 'x', 0, +Infinity, -Infinity, true, false, null, undefined ];
+ for (var i = 0; i < values.length; i++) {
+ TestMapping(m, key, values[i]);
+ assertTrue(m.has(key));
+ assertFalse(m.has(new Object));
+ }
+}
+TestQuery(new WeakMap);
+
+
+// Test expected deletion behavior of WeakMaps
+function TestDelete(m) {
+ var key = new Object;
+ TestMapping(m, key, 'to-be-deleted');
+ assertTrue(m.delete(key));
+ assertFalse(m.delete(key));
+ assertFalse(m.delete(new Object));
+ assertSame(m.get(key), undefined);
+}
+TestDelete(new WeakMap);
+
+
+// Test GC of WeakMaps with entry
+function TestGC1(m) {
+ var key = new Object;
+ m.set(key, 'not-collected');
+ gc();
+ assertSame('not-collected', m.get(key));
+}
+TestGC1(new WeakMap);
+
+
+// Test GC of WeakMaps with chained entries
+function TestGC2(m) {
+ var head = new Object;
+ for (key = head, i = 0; i < 10; i++, key = m.get(key)) {
+ m.set(key, new Object);
+ }
+ gc();
+ var count = 0;
+ for (key = head; key != undefined; key = m.get(key)) {
+ count++;
+ }
+ assertEquals(11, count);
+}
+TestGC2(new WeakMap);
+
+
+// Test property attribute [[Enumerable]]
+function TestEnumerable(func) {
+ function props(x) {
+ var array = [];
+ for (var p in x) array.push(p);
+ return array.sort();
+ }
+ assertArrayEquals([], props(func));
+ assertArrayEquals([], props(func.prototype));
+ assertArrayEquals([], props(new func()));
+}
+TestEnumerable(WeakMap);
+TestEnumerable(WeakSet);
+
+
+// Test arbitrary properties on WeakMaps
+function TestArbitrary(m) {
+ function TestProperty(map, property, value) {
+ map[property] = value;
+ assertEquals(value, map[property]);
+ }
+ for (var i = 0; i < 20; i++) {
+ TestProperty(m, i, 'val' + i);
+ TestProperty(m, 'foo' + i, 'bar' + i);
+ }
+ TestMapping(m, new Object, 'foobar');
+}
+TestArbitrary(new WeakMap);
+
+
+// Test direct constructor call
+assertThrows(function() { WeakMap(); }, TypeError);
+assertThrows(function() { WeakSet(); }, TypeError);
+
+
+// Test some common JavaScript idioms for WeakMaps
+var m = new WeakMap;
+assertTrue(m instanceof WeakMap);
+assertTrue(WeakMap.prototype.set instanceof Function)
+assertTrue(WeakMap.prototype.get instanceof Function)
+assertTrue(WeakMap.prototype.has instanceof Function)
+assertTrue(WeakMap.prototype.delete instanceof Function)
+assertTrue(WeakMap.prototype.clear instanceof Function)
+
+
+// Test some common JavaScript idioms for WeakSets
+var s = new WeakSet;
+assertTrue(s instanceof WeakSet);
+assertTrue(WeakSet.prototype.add instanceof Function)
+assertTrue(WeakSet.prototype.has instanceof Function)
+assertTrue(WeakSet.prototype.delete instanceof Function)
+assertTrue(WeakSet.prototype.clear instanceof Function)
+
+
+// Test class of instance and prototype.
+assertEquals("WeakMap", %_ClassOf(new WeakMap))
+assertEquals("Object", %_ClassOf(WeakMap.prototype))
+assertEquals("WeakSet", %_ClassOf(new WeakSet))
+assertEquals("Object", %_ClassOf(WeakMap.prototype))
+
+
+// Test name of constructor.
+assertEquals("WeakMap", WeakMap.name);
+assertEquals("WeakSet", WeakSet.name);
+
+
+// Test prototype property of WeakMap and WeakSet.
+function TestPrototype(C) {
+ assertTrue(C.prototype instanceof Object);
+ assertEquals({
+ value: {},
+ writable: false,
+ enumerable: false,
+ configurable: false
+ }, Object.getOwnPropertyDescriptor(C, "prototype"));
+}
+TestPrototype(WeakMap);
+TestPrototype(WeakSet);
+
+
+// Test constructor property of the WeakMap and WeakSet prototype.
+function TestConstructor(C) {
+ assertFalse(C === Object.prototype.constructor);
+ assertSame(C, C.prototype.constructor);
+ assertSame(C, (new C).__proto__.constructor);
+}
+TestConstructor(WeakMap);
+TestConstructor(WeakSet);
+
+
+// Test the WeakMap and WeakSet global properties themselves.
+function TestDescriptor(global, C) {
+ assertEquals({
+ value: C,
+ writable: true,
+ enumerable: false,
+ configurable: true
+ }, Object.getOwnPropertyDescriptor(global, C.name));
+}
+TestDescriptor(this, WeakMap);
+TestDescriptor(this, WeakSet);
+
+
+// Regression test for WeakMap prototype.
+assertTrue(WeakMap.prototype.constructor === WeakMap)
+assertTrue(Object.getPrototypeOf(WeakMap.prototype) === Object.prototype)
+
+
+// Regression test for issue 1617: The prototype of the WeakMap constructor
+// needs to be unique (i.e. different from the one of the Object constructor).
+assertFalse(WeakMap.prototype === Object.prototype);
+var o = Object.create({});
+assertFalse("get" in o);
+assertFalse("set" in o);
+assertEquals(undefined, o.get);
+assertEquals(undefined, o.set);
+var o = Object.create({}, { myValue: {
+ value: 10,
+ enumerable: false,
+ configurable: true,
+ writable: true
+}});
+assertEquals(10, o.myValue);
+
+
+// Regression test for issue 1884: Invoking any of the methods for Harmony
+// maps, sets, or weak maps, with a wrong type of receiver should be throwing
+// a proper TypeError.
+var alwaysBogus = [ undefined, null, true, "x", 23, {} ];
+var bogusReceiversTestSet = [
+ { proto: WeakMap.prototype,
+ funcs: [ 'get', 'set', 'has', 'delete' ],
+ receivers: alwaysBogus.concat([ new WeakSet ]),
+ },
+ { proto: WeakSet.prototype,
+ funcs: [ 'add', 'has', 'delete' ],
+ receivers: alwaysBogus.concat([ new WeakMap ]),
+ },
+];
+function TestBogusReceivers(testSet) {
+ for (var i = 0; i < testSet.length; i++) {
+ var proto = testSet[i].proto;
+ var funcs = testSet[i].funcs;
+ var receivers = testSet[i].receivers;
+ for (var j = 0; j < funcs.length; j++) {
+ var func = proto[funcs[j]];
+ for (var k = 0; k < receivers.length; k++) {
+ assertThrows(function () { func.call(receivers[k], {}) }, TypeError);
+ }
+ }
+ }
+}
+TestBogusReceivers(bogusReceiversTestSet);
+
+
+// Test WeakMap clear
+(function() {
+ var k = new Object();
+ var w = new WeakMap();
+ w.set(k, 23);
+ assertTrue(w.has(k));
+ assertEquals(23, w.get(k));
+ w.clear();
+ assertFalse(w.has(k));
+ assertEquals(undefined, w.get(k));
+})();
+
+
+// Test WeakSet clear
+(function() {
+ var k = new Object();
+ var w = new WeakSet();
+ w.add(k);
+ assertTrue(w.has(k));
+ w.clear();
+ assertFalse(w.has(k));
+})();
diff --git a/deps/v8/test/mjsunit/harmony/object-observe.js b/deps/v8/test/mjsunit/es7/object-observe.js
index fb15a1fa8..f5e84a628 100644
--- a/deps/v8/test/mjsunit/harmony/object-observe.js
+++ b/deps/v8/test/mjsunit/es7/object-observe.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-observation --harmony-proxies --harmony-collections
+// Flags: --harmony-proxies --harmony-collections
// Flags: --harmony-symbols --allow-natives-syntax
var allObservers = [];
diff --git a/deps/v8/test/mjsunit/external-array.js b/deps/v8/test/mjsunit/external-array.js
index ab5435e5d..108b9f2e6 100644
--- a/deps/v8/test/mjsunit/external-array.js
+++ b/deps/v8/test/mjsunit/external-array.js
@@ -530,6 +530,7 @@ assertThrows(function() { ArrayBuffer.apply(null, [1000]); }, TypeError);
assertThrows(function() { Float32Array.apply(null, [b, 128, 1]); }, TypeError);
// Test array.set in different combinations.
+var b = new ArrayBuffer(4)
function assertArrayPrefix(expected, array) {
for (var i = 0; i < expected.length; ++i) {
diff --git a/deps/v8/test/mjsunit/function-arguments-duplicate.js b/deps/v8/test/mjsunit/function-arguments-duplicate.js
new file mode 100644
index 000000000..80f03a106
--- /dev/null
+++ b/deps/v8/test/mjsunit/function-arguments-duplicate.js
@@ -0,0 +1,36 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Execises ArgumentsAccessStub::GenerateNewNonStrictSlow.
+
+function f(a, a) {
+ assertEquals(2, a);
+ assertEquals(1, arguments[0]);
+ assertEquals(2, arguments[1]);
+}
+
+f(1, 2);
diff --git a/deps/v8/test/mjsunit/fuzz-natives-part1.js b/deps/v8/test/mjsunit/fuzz-natives-part1.js
index e22ac4947..0bd0dc1ab 100644
--- a/deps/v8/test/mjsunit/fuzz-natives-part1.js
+++ b/deps/v8/test/mjsunit/fuzz-natives-part1.js
@@ -116,7 +116,6 @@ function testArgumentTypes(name, argc) {
var knownProblems = {
"Abort": true,
- "ThrowMessage": true,
// Avoid calling the concat operation, because weird lengths
// may lead to out-of-memory. Ditto for StringBuilderJoin.
@@ -141,33 +140,8 @@ var knownProblems = {
"DisableAccessChecks": true,
"EnableAccessChecks": true,
- // These functions should not be callable as runtime functions.
- "NewFunctionContext": true,
- "NewArgumentsFast": true,
- "NewStrictArgumentsFast": true,
- "PushWithContext": true,
- "PushCatchContext": true,
- "PushBlockContext": true,
- "PushModuleContext": true,
- "CompileUnoptimized": true,
- "CompileOptimized": true,
- "CompileOptimizedConcurrent": true,
- "NotifyDeoptimized": true,
- "NotifyStubFailure": true,
- "NotifyOSR": true,
- "CreateObjectLiteralBoilerplate": true,
- "CloneLiteralBoilerplate": true,
- "CloneShallowLiteralBoilerplate": true,
- "CreateArrayLiteralBoilerplate": true,
+ // IS_VAR is special.
"IS_VAR": true,
- "ResolvePossiblyDirectEval": true,
- "Log": true,
- "DeclareGlobals": true,
- "ArrayConstructor": true,
- "InternalArrayConstructor": true,
-
- "PromoteScheduledException": true,
- "DeleteHandleScopeExtensions": true,
// Vararg with minimum number > 0.
"Call": true,
@@ -206,17 +180,14 @@ var knownProblems = {
"_TwoByteSeqStringSetChar": true,
// Only applicable to TypedArrays.
- "TypedArrayInitialize": true,
+ "_TypedArrayInitialize": true,
// Only applicable to generators.
"_GeneratorNext": true,
"_GeneratorThrow": true,
// Only applicable to DataViews.
- "DataViewInitialize": true,
- "DataViewGetBuffer": true,
- "DataViewGetByteLength": true,
- "DataViewGetByteOffset": true
+ "_DataViewInitialize": true,
};
var currentlyUncallable = {
diff --git a/deps/v8/test/mjsunit/fuzz-natives-part2.js b/deps/v8/test/mjsunit/fuzz-natives-part2.js
index 293ad7e52..103e13291 100644
--- a/deps/v8/test/mjsunit/fuzz-natives-part2.js
+++ b/deps/v8/test/mjsunit/fuzz-natives-part2.js
@@ -116,7 +116,6 @@ function testArgumentTypes(name, argc) {
var knownProblems = {
"Abort": true,
- "ThrowMessage": true,
// Avoid calling the concat operation, because weird lengths
// may lead to out-of-memory. Ditto for StringBuilderJoin.
@@ -141,34 +140,8 @@ var knownProblems = {
"DisableAccessChecks": true,
"EnableAccessChecks": true,
- // These functions should not be callable as runtime functions.
- "NewFunctionContext": true,
- "NewArgumentsFast": true,
- "NewStrictArgumentsFast": true,
- "PushWithContext": true,
- "PushCatchContext": true,
- "PushBlockContext": true,
- "PushModuleContext": true,
- "CompileUnoptimized": true,
- "CompileOptimized": true,
- "CompileOptimizedConcurrent": true,
- "NotifyDeoptimized": true,
- "NotifyStubFailure": true,
- "NotifyOSR": true,
- "CreateObjectLiteralBoilerplate": true,
- "CloneLiteralBoilerplate": true,
- "CloneShallowLiteralBoilerplate": true,
- "CreateArrayLiteralBoilerplate": true,
+ // IS_VAR is special.
"IS_VAR": true,
- "ResolvePossiblyDirectEval": true,
- "Log": true,
- "DeclareGlobals": true,
- "ArrayConstructor": true,
- "InternalArrayConstructor": true,
- "SetAccessorProperty": true,
-
- "PromoteScheduledException": true,
- "DeleteHandleScopeExtensions": true,
// Vararg with minimum number > 0.
"Call": true,
@@ -207,17 +180,14 @@ var knownProblems = {
"_TwoByteSeqStringSetChar": true,
// Only applicable to TypedArrays.
- "TypedArrayInitialize": true,
+ "_TypedArrayInitialize": true,
// Only applicable to generators.
"_GeneratorNext": true,
"_GeneratorThrow": true,
// Only applicable to DataViews.
- "DataViewInitialize": true,
- "DataViewGetBuffer": true,
- "DataViewGetByteLength": true,
- "DataViewGetByteOffset": true
+ "_DataViewInitialize": true,
};
var currentlyUncallable = {
diff --git a/deps/v8/test/mjsunit/fuzz-natives-part3.js b/deps/v8/test/mjsunit/fuzz-natives-part3.js
index ba51b3db7..7a8125a73 100644
--- a/deps/v8/test/mjsunit/fuzz-natives-part3.js
+++ b/deps/v8/test/mjsunit/fuzz-natives-part3.js
@@ -116,7 +116,6 @@ function testArgumentTypes(name, argc) {
var knownProblems = {
"Abort": true,
- "ThrowMessage": true,
// Avoid calling the concat operation, because weird lengths
// may lead to out-of-memory. Ditto for StringBuilderJoin.
@@ -141,33 +140,8 @@ var knownProblems = {
"DisableAccessChecks": true,
"EnableAccessChecks": true,
- // These functions should not be callable as runtime functions.
- "NewFunctionContext": true,
- "NewArgumentsFast": true,
- "NewStrictArgumentsFast": true,
- "PushWithContext": true,
- "PushCatchContext": true,
- "PushBlockContext": true,
- "PushModuleContext": true,
- "CompileUnoptimized": true,
- "CompileOptimized": true,
- "CompileOptimizedConcurrent": true,
- "NotifyDeoptimized": true,
- "NotifyStubFailure": true,
- "NotifyOSR": true,
- "CreateObjectLiteralBoilerplate": true,
- "CloneLiteralBoilerplate": true,
- "CloneShallowLiteralBoilerplate": true,
- "CreateArrayLiteralBoilerplate": true,
+ // IS_VAR is special.
"IS_VAR": true,
- "ResolvePossiblyDirectEval": true,
- "Log": true,
- "DeclareGlobals": true,
- "ArrayConstructor": true,
- "InternalArrayConstructor": true,
-
- "PromoteScheduledException": true,
- "DeleteHandleScopeExtensions": true,
// Vararg with minimum number > 0.
"Call": true,
@@ -206,17 +180,14 @@ var knownProblems = {
"_TwoByteSeqStringSetChar": true,
// Only applicable to TypedArrays.
- "TypedArrayInitialize": true,
+ "_TypedArrayInitialize": true,
// Only applicable to generators.
"_GeneratorNext": true,
"_GeneratorThrow": true,
// Only applicable to DataViews.
- "DataViewInitialize":true,
- "DataViewGetBuffer": true,
- "DataViewGetByteLength": true,
- "DataViewGetByteOffset": true
+ "_DataViewInitialize": true,
};
var currentlyUncallable = {
diff --git a/deps/v8/test/mjsunit/fuzz-natives-part4.js b/deps/v8/test/mjsunit/fuzz-natives-part4.js
index 5f1f91206..952374925 100644
--- a/deps/v8/test/mjsunit/fuzz-natives-part4.js
+++ b/deps/v8/test/mjsunit/fuzz-natives-part4.js
@@ -116,7 +116,6 @@ function testArgumentTypes(name, argc) {
var knownProblems = {
"Abort": true,
- "ThrowMessage": true,
// Avoid calling the concat operation, because weird lengths
// may lead to out-of-memory. Ditto for StringBuilderJoin.
@@ -141,33 +140,8 @@ var knownProblems = {
"DisableAccessChecks": true,
"EnableAccessChecks": true,
- // These functions should not be callable as runtime functions.
- "NewFunctionContext": true,
- "NewArgumentsFast": true,
- "NewStrictArgumentsFast": true,
- "PushWithContext": true,
- "PushCatchContext": true,
- "PushBlockContext": true,
- "PushModuleContext": true,
- "CompileUnoptimized": true,
- "CompileOptimized": true,
- "CompileOptimizedConcurrent": true,
- "NotifyDeoptimized": true,
- "NotifyStubFailure": true,
- "NotifyOSR": true,
- "CreateObjectLiteralBoilerplate": true,
- "CloneLiteralBoilerplate": true,
- "CloneShallowLiteralBoilerplate": true,
- "CreateArrayLiteralBoilerplate": true,
+ // IS_VAR is special.
"IS_VAR": true,
- "ResolvePossiblyDirectEval": true,
- "Log": true,
- "DeclareGlobals": true,
- "ArrayConstructor": true,
- "InternalArrayConstructor": true,
-
- "PromoteScheduledException": true,
- "DeleteHandleScopeExtensions": true,
// Vararg with minimum number > 0.
"Call": true,
@@ -206,17 +180,14 @@ var knownProblems = {
"_TwoByteSeqStringSetChar": true,
// Only applicable to TypedArrays.
- "TypedArrayInitialize": true,
+ "_TypedArrayInitialize": true,
// Only applicable to generators.
"_GeneratorNext": true,
"_GeneratorThrow": true,
// Only applicable to DataViews.
- "DataViewInitialize": true,
- "DataViewGetBuffer": true,
- "DataViewGetByteLength": true,
- "DataViewGetByteOffset": true
+ "_DataViewInitialize": true,
};
var currentlyUncallable = {
diff --git a/deps/v8/test/mjsunit/getters-on-elements.js b/deps/v8/test/mjsunit/getters-on-elements.js
index 55fc86b84..3bc360f14 100644
--- a/deps/v8/test/mjsunit/getters-on-elements.js
+++ b/deps/v8/test/mjsunit/getters-on-elements.js
@@ -26,10 +26,17 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --max-opt-count=100 --noalways-opt
+// Flags: --nocollect-maps
// We specify max-opt-count because we opt/deopt the same function many
// times.
+// We specify nocollect-maps because in gcstress we can end up deoptimizing
+// a function in a gc in the stack guard at the beginning of the (optimized)
+// function due to leftover map clearing work that results in deoptimizing
+// dependent code from those maps. The choice is to insert strategic gc()
+// calls or specify this flag.
+
// It's nice to run this in other browsers too.
var standalone = false;
if (standalone) {
diff --git a/deps/v8/test/mjsunit/harmony/block-let-declaration.js b/deps/v8/test/mjsunit/harmony/block-let-declaration.js
index 480e03348..4ddeefdba 100644
--- a/deps/v8/test/mjsunit/harmony/block-let-declaration.js
+++ b/deps/v8/test/mjsunit/harmony/block-let-declaration.js
@@ -109,9 +109,9 @@ TestLocalDoesNotThrow("switch (true) { case true: var x; }");
TestLocalDoesNotThrow("switch (true) { default: var x; }");
// Test function declarations in source element and
-// non-strict statement positions.
+// sloppy statement positions.
function f() {
- // Non-strict source element positions.
+ // Sloppy source element positions.
function g0() {
"use strict";
// Strict source element positions.
diff --git a/deps/v8/test/mjsunit/harmony/collections.js b/deps/v8/test/mjsunit/harmony/collections.js
index 7e95b9e11..804a320f3 100644
--- a/deps/v8/test/mjsunit/harmony/collections.js
+++ b/deps/v8/test/mjsunit/harmony/collections.js
@@ -25,10 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-collections --expose-gc --allow-natives-syntax
+// Flags: --harmony-collections
+// Flags: --expose-gc --allow-natives-syntax
-// Test valid getter and setter calls on Sets.
+// Test valid getter and setter calls on Sets and WeakSets
function TestValidSetCalls(m) {
assertDoesNotThrow(function () { m.add(new Object) });
assertDoesNotThrow(function () { m.has(new Object) });
@@ -63,7 +64,7 @@ function TestInvalidCalls(m) {
TestInvalidCalls(new WeakMap);
-// Test expected behavior for Sets
+// Test expected behavior for Sets and WeakSets
function TestSet(set, key) {
assertFalse(set.has(key));
assertSame(undefined, set.add(key));
@@ -289,19 +290,20 @@ assertEquals("WeakSet", WeakSet.name);
// Test prototype property of Set, Map, WeakMap and WeakSet.
-function TestPrototype(C) {
+// TODO(2793): Should all be non-writable, and the extra flag removed.
+function TestPrototype(C, writable) {
assertTrue(C.prototype instanceof Object);
assertEquals({
value: {},
- writable: true, // TODO(2793): This should be non-writable.
+ writable: writable,
enumerable: false,
configurable: false
}, Object.getOwnPropertyDescriptor(C, "prototype"));
}
-TestPrototype(Set);
-TestPrototype(Map);
-TestPrototype(WeakMap);
-TestPrototype(WeakSet);
+TestPrototype(Set, true);
+TestPrototype(Map, true);
+TestPrototype(WeakMap, false);
+TestPrototype(WeakSet, false);
// Test constructor property of the Set, Map, WeakMap and WeakSet prototype.
diff --git a/deps/v8/test/mjsunit/harmony/generators-objects.js b/deps/v8/test/mjsunit/harmony/generators-objects.js
index bb29bed00..c1cda07db 100644
--- a/deps/v8/test/mjsunit/harmony/generators-objects.js
+++ b/deps/v8/test/mjsunit/harmony/generators-objects.js
@@ -55,7 +55,7 @@ function TestGeneratorObject() {
var iter = g();
assertSame(g.prototype, Object.getPrototypeOf(iter));
assertTrue(iter instanceof g);
- assertEquals("Generator", %ClassOf(iter));
+ assertEquals("Generator", %_ClassOf(iter));
assertEquals("[object Generator]", String(iter));
assertEquals([], Object.getOwnPropertyNames(iter));
assertTrue(iter !== g());
@@ -64,7 +64,7 @@ function TestGeneratorObject() {
iter = new g();
assertSame(g.prototype, Object.getPrototypeOf(iter));
assertTrue(iter instanceof g);
- assertEquals("Generator", %ClassOf(iter));
+ assertEquals("Generator", %_ClassOf(iter));
assertEquals("[object Generator]", String(iter));
assertEquals([], Object.getOwnPropertyNames(iter));
assertTrue(iter !== new g());
diff --git a/deps/v8/test/mjsunit/harmony/generators-parsing.js b/deps/v8/test/mjsunit/harmony/generators-parsing.js
index 941fa202c..2a4a68c37 100644
--- a/deps/v8/test/mjsunit/harmony/generators-parsing.js
+++ b/deps/v8/test/mjsunit/harmony/generators-parsing.js
@@ -68,7 +68,7 @@ function* yield() { (yield 3) + (yield 4); }
assertThrows("function* yield() { \"use strict\"; (yield 3) + (yield 4); }",
SyntaxError);
-// In classic mode, yield is a normal identifier, outside of generators.
+// In sloppy mode, yield is a normal identifier, outside of generators.
function yield(yield) { yield: yield (yield + yield (0)); }
// Yield is always valid as a key in an object literal.
@@ -76,7 +76,7 @@ function yield(yield) { yield: yield (yield + yield (0)); }
function* g() { yield ({ yield: 1 }) }
function* g() { yield ({ get yield() { return 1; }}) }
-// Checks that yield is a valid label in classic mode, but not valid in a strict
+// Checks that yield is a valid label in sloppy mode, but not valid in a strict
// mode or in generators.
function f() { yield: 1 }
assertThrows("function f() { \"use strict\"; yield: 1 }", SyntaxError)
diff --git a/deps/v8/test/mjsunit/harmony/private.js b/deps/v8/test/mjsunit/harmony/private.js
index 09cf7f740..225799831 100644
--- a/deps/v8/test/mjsunit/harmony/private.js
+++ b/deps/v8/test/mjsunit/harmony/private.js
@@ -30,6 +30,16 @@
var symbols = []
+
+// Returns true if the string is a valid
+// serialization of Symbols added to the 'symbols'
+// array. Adjust if you extend 'symbols' with other
+// values.
+function isValidSymbolString(s) {
+ return ["Symbol(66)"].indexOf(s) >= 0;
+}
+
+
// Test different forms of constructor calls, all equivalent.
function TestNew() {
for (var i = 0; i < 2; ++i) {
@@ -49,7 +59,6 @@ function TestType() {
assertTrue(typeof symbols[i] === "symbol")
assertTrue(%SymbolIsPrivate(symbols[i]))
assertEquals(null, %_ClassOf(symbols[i]))
- assertEquals("Symbol", %_ClassOf(new Symbol(symbols[i])))
assertEquals("Symbol", %_ClassOf(Object(symbols[i])))
}
}
@@ -67,28 +76,21 @@ TestPrototype()
function TestConstructor() {
for (var i in symbols) {
assertSame(Symbol, symbols[i].__proto__.constructor)
+ assertSame(Symbol, Object(symbols[i]).__proto__.constructor)
}
}
TestConstructor()
-function TestName() {
- for (var i in symbols) {
- var name = symbols[i].name
- assertTrue(name === "66")
- }
-}
-TestName()
-
-
function TestToString() {
for (var i in symbols) {
assertThrows(function() { String(symbols[i]) }, TypeError)
assertThrows(function() { symbols[i] + "" }, TypeError)
- assertThrows(function() { symbols[i].toString() }, TypeError)
- assertThrows(function() { (new Symbol(symbols[i])).toString() }, TypeError)
- assertThrows(function() { Object(symbols[i]).toString() }, TypeError)
- assertEquals("[object Symbol]", Object.prototype.toString.call(symbols[i]))
+ assertTrue(isValidSymbolString(symbols[i].toString()))
+ assertTrue(isValidSymbolString(Object(symbols[i]).toString()))
+ assertTrue(isValidSymbolString(Symbol.prototype.toString.call(symbols[i])))
+ assertEquals(
+ "[object Symbol]", Object.prototype.toString.call(symbols[i]))
}
}
TestToString()
@@ -128,10 +130,14 @@ function TestEquality() {
assertTrue(Object.is(symbols[i], symbols[i]))
assertTrue(symbols[i] === symbols[i])
assertTrue(symbols[i] == symbols[i])
- assertFalse(symbols[i] === new Symbol(symbols[i]))
- assertFalse(new Symbol(symbols[i]) === symbols[i])
- assertTrue(symbols[i] == new Symbol(symbols[i]))
- assertTrue(new Symbol(symbols[i]) == symbols[i])
+ assertFalse(symbols[i] === Object(symbols[i]))
+ assertFalse(Object(symbols[i]) === symbols[i])
+ assertFalse(symbols[i] == Object(symbols[i]))
+ assertFalse(Object(symbols[i]) == symbols[i])
+ assertTrue(symbols[i] === symbols[i].valueOf())
+ assertTrue(symbols[i].valueOf() === symbols[i])
+ assertTrue(symbols[i] == symbols[i].valueOf())
+ assertTrue(symbols[i].valueOf() == symbols[i])
}
// All symbols should be distinct.
@@ -159,7 +165,7 @@ TestEquality()
function TestGet() {
for (var i in symbols) {
- assertThrows(function() { symbols[i].toString() }, TypeError)
+ assertTrue(isValidSymbolString(symbols[i].toString()))
assertEquals(symbols[i], symbols[i].valueOf())
assertEquals(undefined, symbols[i].a)
assertEquals(undefined, symbols[i]["a" + "b"])
@@ -173,7 +179,7 @@ TestGet()
function TestSet() {
for (var i in symbols) {
symbols[i].toString = 0
- assertThrows(function() { symbols[i].toString() }, TypeError)
+ assertTrue(isValidSymbolString(symbols[i].toString()))
symbols[i].valueOf = 0
assertEquals(symbols[i], symbols[i].valueOf())
symbols[i].a = 0
@@ -322,3 +328,17 @@ function TestCachedKeyAfterScavenge() {
}
}
TestCachedKeyAfterScavenge();
+
+
+function TestGetOwnPropertySymbols() {
+ var privateSymbol = %CreatePrivateSymbol("private")
+ var publicSymbol = Symbol()
+ var publicSymbol2 = Symbol()
+ var obj = {}
+ obj[publicSymbol] = 1
+ obj[privateSymbol] = 2
+ obj[publicSymbol2] = 3
+ var syms = Object.getOwnPropertySymbols(obj)
+ assertEquals(syms, [publicSymbol, publicSymbol2])
+}
+TestGetOwnPropertySymbols()
diff --git a/deps/v8/test/mjsunit/harmony/proxies-example-membrane.js b/deps/v8/test/mjsunit/harmony/proxies-example-membrane.js
index 9e2228a63..a645a6603 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-example-membrane.js
+++ b/deps/v8/test/mjsunit/harmony/proxies-example-membrane.js
@@ -72,7 +72,7 @@ function createHandler(obj) {
hasOwn: function(name) { return ({}).hasOwnProperty.call(obj, name); },
get: function(receiver, name) { return obj[name]; },
set: function(receiver, name, val) {
- obj[name] = val; // bad behavior when set fails in non-strict mode
+ obj[name] = val; // bad behavior when set fails in sloppy mode
return true;
},
enumerate: function() {
diff --git a/deps/v8/test/mjsunit/harmony/proxies-function.js b/deps/v8/test/mjsunit/harmony/proxies-function.js
index 7b07d4242..c024cef94 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-function.js
+++ b/deps/v8/test/mjsunit/harmony/proxies-function.js
@@ -707,7 +707,7 @@ function TestCalls() {
function(f, x, y, o) { if (typeof o == "object") return (1, o)["f"](x, y) },
]
var receivers = [o, global_object, undefined, null, 2, "bla", true]
- var expectedNonStricts = [o, global_object, global_object, global_object]
+ var expectedSloppies = [o, global_object, global_object, global_object]
for (var t = 0; t < traps.length; ++t) {
for (var i = 0; i < creates.length; ++i) {
@@ -719,7 +719,7 @@ function TestCalls() {
var receiver = receivers[n]
var func = binds[j](creates[i](traps[t]), bound, 31, 11)
var expected = j > 0 ? bound : receiver
- var expectedNonStrict = expectedNonStricts[j > 0 ? m : n]
+ var expectedSloppy = expectedSloppies[j > 0 ? m : n]
o.f = func
global_object.f = func
var x = calls[k](func, 11, 31, receiver)
@@ -729,10 +729,10 @@ function TestCalls() {
assertSame(x.strict ? undefined : global_object, x.receiver)
else if (x.strict)
assertSame(expected, x.receiver)
- else if (expectedNonStrict === undefined)
+ else if (expectedSloppy === undefined)
assertSame(expected, x.receiver.valueOf())
else
- assertSame(expectedNonStrict, x.receiver)
+ assertSame(expectedSloppy, x.receiver)
}
}
}
diff --git a/deps/v8/test/mjsunit/harmony/proxies.js b/deps/v8/test/mjsunit/harmony/proxies.js
index f68e3bd15..00e605f8d 100644
--- a/deps/v8/test/mjsunit/harmony/proxies.js
+++ b/deps/v8/test/mjsunit/harmony/proxies.js
@@ -25,7 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-proxies
+// We change the stack size for the ARM64 simulator because at one point this
+// test enters an infinite recursion which goes through the runtime and we
+// overflow the system stack before the simulator stack.
+
+// Flags: --harmony-proxies --sim-stack-size=500
// Helper.
diff --git a/deps/v8/test/mjsunit/regress/regress-173361.js b/deps/v8/test/mjsunit/harmony/regress/regress-173361.js
index f9cfb6684..f9cfb6684 100644
--- a/deps/v8/test/mjsunit/regress/regress-173361.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-173361.js
diff --git a/deps/v8/test/mjsunit/regress/regress-2186.js b/deps/v8/test/mjsunit/harmony/regress/regress-2186.js
index 0921dcead..0921dcead 100644
--- a/deps/v8/test/mjsunit/regress/regress-2186.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-2186.js
diff --git a/deps/v8/test/mjsunit/regress/regress-2219.js b/deps/v8/test/mjsunit/harmony/regress/regress-2219.js
index 946c75bd8..946c75bd8 100644
--- a/deps/v8/test/mjsunit/regress/regress-2219.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-2219.js
diff --git a/deps/v8/test/mjsunit/regress/regress-2225.js b/deps/v8/test/mjsunit/harmony/regress/regress-2225.js
index 9957d8d46..9957d8d46 100644
--- a/deps/v8/test/mjsunit/regress/regress-2225.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-2225.js
diff --git a/deps/v8/test/mjsunit/regress/regress-2243.js b/deps/v8/test/mjsunit/harmony/regress/regress-2243.js
index 31c2e55fe..31c2e55fe 100644
--- a/deps/v8/test/mjsunit/regress/regress-2243.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-2243.js
diff --git a/deps/v8/test/mjsunit/regress/regress-2322.js b/deps/v8/test/mjsunit/harmony/regress/regress-2322.js
index 1195bab67..1195bab67 100644
--- a/deps/v8/test/mjsunit/regress/regress-2322.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-2322.js
diff --git a/deps/v8/test/mjsunit/regress/regress-2681.js b/deps/v8/test/mjsunit/harmony/regress/regress-2681.js
index 9841d8484..9841d8484 100644
--- a/deps/v8/test/mjsunit/regress/regress-2681.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-2681.js
diff --git a/deps/v8/test/mjsunit/regress/regress-2691.js b/deps/v8/test/mjsunit/harmony/regress/regress-2691.js
index e17be1081..e17be1081 100644
--- a/deps/v8/test/mjsunit/regress/regress-2691.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-2691.js
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-343928.js b/deps/v8/test/mjsunit/harmony/regress/regress-343928.js
new file mode 100644
index 000000000..b102ab9c4
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-343928.js
@@ -0,0 +1,22 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony --expose-debug-as=debug
+
+(function () { // Scope for utility functions.
+ escaping_function = function(object) {
+ // Argument must not be null or undefined.
+ var string = Object.prototype.toString.call(object);
+ // String has format [object <ClassName>].
+ return string.substring(8, string.length - 1);
+ }
+})();
+
+module B {
+ var stuff = 3
+}
+
+var __v_0 = {};
+var __v_4 = debug.MakeMirror(__v_0);
+print(__v_4.referencedBy().length); // core dump here if not fixed.
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-248025.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-248025.js
index c59885956..c59885956 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-248025.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-248025.js
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-346141.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-346141.js
new file mode 100644
index 000000000..798b7704e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-346141.js
@@ -0,0 +1,11 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-symbols
+
+var s = Symbol()
+var o = {}
+o[s] = 2
+o[""] = 3
+Object.getOwnPropertySymbols(o)
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-347528.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-347528.js
new file mode 100644
index 000000000..e4e8efbc9
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-347528.js
@@ -0,0 +1,36 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --harmony
+
+"use strict";
+let unused_var = 1;
+function __f_12() { new Array(); }
+__f_12();
+__f_12();
+%OptimizeFunctionOnNextCall(__f_12);
+__f_12();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-lookup-transition.js b/deps/v8/test/mjsunit/harmony/regress/regress-lookup-transition.js
new file mode 100644
index 000000000..9b3293930
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-lookup-transition.js
@@ -0,0 +1,14 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies --expose-gc
+
+var proxy = Proxy.create({ getPropertyDescriptor:function(key) {
+ gc();
+}});
+
+function f() { this.x = 23; }
+f.prototype = proxy;
+new f();
+new f();
diff --git a/deps/v8/test/mjsunit/regress/regress-observe-empty-double-array.js b/deps/v8/test/mjsunit/harmony/regress/regress-observe-empty-double-array.js
index 4b651694a..301ece70f 100644
--- a/deps/v8/test/mjsunit/regress/regress-observe-empty-double-array.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-observe-empty-double-array.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-observation --allow-natives-syntax
+// Flags: --allow-natives-syntax
//
// Test passes if it does not crash.
diff --git a/deps/v8/test/mjsunit/set-prototype-of.js b/deps/v8/test/mjsunit/harmony/set-prototype-of.js
index 02bd5e2ee..02bd5e2ee 100644
--- a/deps/v8/test/mjsunit/set-prototype-of.js
+++ b/deps/v8/test/mjsunit/harmony/set-prototype-of.js
diff --git a/deps/v8/test/mjsunit/harmony/symbols.js b/deps/v8/test/mjsunit/harmony/symbols.js
index ce02a05ac..220439291 100644
--- a/deps/v8/test/mjsunit/harmony/symbols.js
+++ b/deps/v8/test/mjsunit/harmony/symbols.js
@@ -30,27 +30,35 @@
var symbols = []
-// Test different forms of constructor calls, all equivalent.
+
+// Returns true if the string is a valid
+// serialization of Symbols added to the 'symbols'
+// array. Adjust if you extend 'symbols' with other
+// values.
+function isValidSymbolString(s) {
+ return ["Symbol(66)", "Symbol()"].indexOf(s) >= 0;
+}
+
+
+// Test different forms of constructor calls.
function TestNew() {
- function IndirectSymbol() { return new Symbol }
- function indirect() { return new IndirectSymbol() }
+ function indirectSymbol() { return Symbol() }
+ function indirect() { return indirectSymbol() }
for (var i = 0; i < 2; ++i) {
for (var j = 0; j < 5; ++j) {
symbols.push(Symbol())
symbols.push(Symbol(undefined))
symbols.push(Symbol("66"))
symbols.push(Symbol(66))
- symbols.push(Symbol(Symbol()))
- symbols.push((new Symbol).valueOf())
- symbols.push((new Symbol()).valueOf())
- symbols.push((new Symbol(Symbol())).valueOf())
- symbols.push(Object(Symbol()).valueOf())
- symbols.push((indirect()).valueOf())
+ symbols.push(Symbol().valueOf())
+ symbols.push(indirect())
}
%OptimizeFunctionOnNextCall(indirect)
indirect() // Call once before GC throws away type feedback.
gc() // Promote existing symbols and then allocate some more.
}
+ assertThrows(function () { Symbol(Symbol()) }, TypeError)
+ assertThrows(function () { new Symbol(66) }, TypeError)
}
TestNew()
@@ -61,7 +69,6 @@ function TestType() {
assertTrue(typeof symbols[i] === "symbol")
assertFalse(%SymbolIsPrivate(symbols[i]))
assertEquals(null, %_ClassOf(symbols[i]))
- assertEquals("Symbol", %_ClassOf(new Symbol(symbols[i])))
assertEquals("Symbol", %_ClassOf(Object(symbols[i])))
}
}
@@ -71,10 +78,6 @@ TestType()
function TestPrototype() {
assertSame(Object.prototype, Symbol.prototype.__proto__)
assertSame(Symbol.prototype, Symbol().__proto__)
- assertSame(Symbol.prototype, Symbol(Symbol()).__proto__)
- assertSame(Symbol.prototype, (new Symbol).__proto__)
- assertSame(Symbol.prototype, (new Symbol()).__proto__)
- assertSame(Symbol.prototype, (new Symbol(Symbol())).__proto__)
assertSame(Symbol.prototype, Object(Symbol()).__proto__)
for (var i in symbols) {
assertSame(Symbol.prototype, symbols[i].__proto__)
@@ -84,14 +87,11 @@ TestPrototype()
function TestConstructor() {
+ assertSame(Function.prototype, Symbol.__proto__)
assertFalse(Object === Symbol.prototype.constructor)
assertFalse(Symbol === Object.prototype.constructor)
assertSame(Symbol, Symbol.prototype.constructor)
assertSame(Symbol, Symbol().__proto__.constructor)
- assertSame(Symbol, Symbol(Symbol()).__proto__.constructor)
- assertSame(Symbol, (new Symbol).__proto__.constructor)
- assertSame(Symbol, (new Symbol()).__proto__.constructor)
- assertSame(Symbol, (new Symbol(Symbol())).__proto__.constructor)
assertSame(Symbol, Object(Symbol()).__proto__.constructor)
for (var i in symbols) {
assertSame(Symbol, symbols[i].__proto__.constructor)
@@ -100,23 +100,26 @@ function TestConstructor() {
TestConstructor()
-function TestName() {
+function TestValueOf() {
for (var i in symbols) {
- var name = symbols[i].name
- assertTrue(name === undefined || name === "66")
+ assertTrue(symbols[i] === symbols[i].valueOf())
+ assertTrue(Symbol.prototype.valueOf.call(symbols[i]) === symbols[i])
}
}
-TestName()
+TestValueOf()
function TestToString() {
for (var i in symbols) {
assertThrows(function() { String(symbols[i]) }, TypeError)
assertThrows(function() { symbols[i] + "" }, TypeError)
- assertThrows(function() { symbols[i].toString() }, TypeError)
- assertThrows(function() { (new Symbol(symbols[i])).toString() }, TypeError)
- assertThrows(function() { Object(symbols[i]).toString() }, TypeError)
- assertEquals("[object Symbol]", Object.prototype.toString.call(symbols[i]))
+ assertTrue(isValidSymbolString(String(Object(symbols[i]))))
+ assertTrue(isValidSymbolString(symbols[i].toString()))
+ assertTrue(isValidSymbolString(Object(symbols[i]).toString()))
+ assertTrue(
+ isValidSymbolString(Symbol.prototype.toString.call(symbols[i])))
+ assertEquals(
+ "[object Symbol]", Object.prototype.toString.call(symbols[i]))
}
}
TestToString()
@@ -156,10 +159,16 @@ function TestEquality() {
assertTrue(Object.is(symbols[i], symbols[i]))
assertTrue(symbols[i] === symbols[i])
assertTrue(symbols[i] == symbols[i])
- assertFalse(symbols[i] === new Symbol(symbols[i]))
- assertFalse(new Symbol(symbols[i]) === symbols[i])
- assertTrue(symbols[i] == new Symbol(symbols[i]))
- assertTrue(new Symbol(symbols[i]) == symbols[i])
+ assertFalse(symbols[i] === Object(symbols[i]))
+ assertFalse(Object(symbols[i]) === symbols[i])
+ assertFalse(symbols[i] == Object(symbols[i]))
+ assertFalse(Object(symbols[i]) == symbols[i])
+ assertTrue(symbols[i] === symbols[i].valueOf())
+ assertTrue(symbols[i].valueOf() === symbols[i])
+ assertTrue(symbols[i] == symbols[i].valueOf())
+ assertTrue(symbols[i].valueOf() == symbols[i])
+ assertFalse(Object(symbols[i]) === Object(symbols[i]))
+ assertEquals(Object(symbols[i]).valueOf(), Object(symbols[i]).valueOf())
}
// All symbols should be distinct.
@@ -187,7 +196,7 @@ TestEquality()
function TestGet() {
for (var i in symbols) {
- assertThrows(function() { symbols[i].toString() }, TypeError)
+ assertTrue(isValidSymbolString(symbols[i].toString()))
assertEquals(symbols[i], symbols[i].valueOf())
assertEquals(undefined, symbols[i].a)
assertEquals(undefined, symbols[i]["a" + "b"])
@@ -201,7 +210,7 @@ TestGet()
function TestSet() {
for (var i in symbols) {
symbols[i].toString = 0
- assertThrows(function() { symbols[i].toString() }, TypeError)
+ assertTrue(isValidSymbolString(symbols[i].toString()))
symbols[i].valueOf = 0
assertEquals(symbols[i], symbols[i].valueOf())
symbols[i].a = 0
@@ -215,6 +224,18 @@ function TestSet() {
TestSet()
+// Test Symbol wrapping/boxing over non-builtins.
+Symbol.prototype.getThisProto = function () {
+ return Object.getPrototypeOf(this);
+}
+function TestCall() {
+ for (var i in symbols) {
+ assertTrue(symbols[i].getThisProto() === Symbol.prototype)
+ }
+}
+TestCall()
+
+
function TestCollections() {
var set = new Set
var map = new Map
@@ -309,7 +330,7 @@ function TestGetOwnPropertySymbols(obj) {
function TestKeyDescriptor(obj) {
for (var i in symbols) {
- var desc = Object.getOwnPropertyDescriptor(obj, symbols[i]);
+ var desc = Object.getOwnPropertyDescriptor(obj, symbols[i])
assertEquals(i|0, desc.value)
assertTrue(desc.configurable)
assertEquals(i % 2 == 0, desc.writable)
@@ -389,15 +410,59 @@ function TestGetOwnPropertySymbolsWithProto() {
TestGetOwnPropertySymbolsWithProto()
-function TestGetOwnPropertySymbolsWithPrivateSymbols() {
- var privateSymbol = %CreatePrivateSymbol("private")
- var publicSymbol = Symbol()
- var publicSymbol2 = Symbol()
- var obj = {}
- obj[publicSymbol] = 1
- obj[privateSymbol] = 2
- obj[publicSymbol2] = 3
- var syms = Object.getOwnPropertySymbols(obj)
- assertEquals(syms, [publicSymbol, publicSymbol2])
+function TestWellKnown() {
+ var symbols = [
+ "create", "hasInstance", "isConcatSpreadable", "isRegExp",
+ "iterator", "toStringTag", "unscopables"
+ ]
+
+ for (var i in symbols) {
+ var name = symbols[i]
+ var desc = Object.getOwnPropertyDescriptor(Symbol, name)
+ assertSame("symbol", typeof desc.value)
+ assertSame("Symbol(Symbol." + name + ")", desc.value.toString())
+ assertFalse(desc.writable)
+ assertFalse(desc.configurable)
+ assertFalse(desc.enumerable)
+
+ assertFalse(Symbol.for("Symbol." + name) === desc.value)
+ assertTrue(Symbol.keyFor(desc.value) === undefined)
+ }
+}
+TestWellKnown()
+
+
+function TestRegistry() {
+ var symbol1 = Symbol.for("x1")
+ var symbol2 = Symbol.for("x2")
+ assertFalse(symbol1 === symbol2)
+
+ assertSame(symbol1, Symbol.for("x1"))
+ assertSame(symbol2, Symbol.for("x2"))
+ assertSame("x1", Symbol.keyFor(symbol1))
+ assertSame("x2", Symbol.keyFor(symbol2))
+
+ assertSame(Symbol.for("1"), Symbol.for(1))
+ assertThrows(function() { Symbol.keyFor("bla") }, TypeError)
+ assertThrows(function() { Symbol.keyFor({}) }, TypeError)
+
+ var realm = Realm.create()
+ assertFalse(Symbol === Realm.eval(realm, "Symbol"))
+ assertFalse(Symbol.for === Realm.eval(realm, "Symbol.for"))
+ assertFalse(Symbol.keyFor === Realm.eval(realm, "Symbol.keyFor"))
+ assertSame(Symbol.create, Realm.eval(realm, "Symbol.create"))
+ assertSame(Symbol.iterator, Realm.eval(realm, "Symbol.iterator"))
+
+ assertSame(symbol1, Realm.eval(realm, "Symbol.for")("x1"))
+ assertSame(symbol1, Realm.eval(realm, "Symbol.for('x1')"))
+ assertSame("x1", Realm.eval(realm, "Symbol.keyFor")(symbol1))
+ Realm.shared = symbol1
+ assertSame("x1", Realm.eval(realm, "Symbol.keyFor(Realm.shared)"))
+
+ var symbol3 = Realm.eval(realm, "Symbol.for('x3')")
+ assertFalse(symbol1 === symbol3)
+ assertFalse(symbol2 === symbol3)
+ assertSame(symbol3, Symbol.for("x3"))
+ assertSame("x3", Symbol.keyFor(symbol3))
}
-TestGetOwnPropertySymbolsWithPrivateSymbols()
+TestRegistry()
diff --git a/deps/v8/test/mjsunit/invalid-lhs.js b/deps/v8/test/mjsunit/invalid-lhs.js
index ef63add77..92f5c6ff7 100644
--- a/deps/v8/test/mjsunit/invalid-lhs.js
+++ b/deps/v8/test/mjsunit/invalid-lhs.js
@@ -29,37 +29,37 @@
// exceptions are delayed until runtime.
// Normal assignments:
-assertThrows("12 = 12");
-assertThrows("x++ = 12");
-assertThrows("eval('var x') = 12");
-assertDoesNotThrow("if (false) eval('var x') = 12");
+assertThrows("12 = 12", ReferenceError);
+assertThrows("x++ = 12", ReferenceError);
+assertThrows("eval('var x') = 12", ReferenceError);
+assertThrows("if (false) eval('var x') = 12", ReferenceError);
// Pre- and post-fix operations:
-assertThrows("12++");
-assertThrows("12--");
-assertThrows("--12");
-assertThrows("++12");
-assertThrows("++(eval('12'))");
-assertThrows("(eval('12'))++");
-assertDoesNotThrow("if (false) ++(eval('12'))");
-assertDoesNotThrow("if (false) (eval('12'))++");
+assertThrows("12++", ReferenceError);
+assertThrows("12--", ReferenceError);
+assertThrows("--12", ReferenceError);
+assertThrows("++12", ReferenceError);
+assertThrows("++(eval('12'))", ReferenceError);
+assertThrows("(eval('12'))++", ReferenceError);
+assertThrows("if (false) ++(eval('12'))", ReferenceError);
+assertThrows("if (false) (eval('12'))++", ReferenceError);
// For in:
-assertThrows("for (12 in [1]) print(12);");
-assertThrows("for (eval('var x') in [1]) print(12);");
-assertDoesNotThrow("if (false) for (eval('var x') in [1]) print(12);");
+assertThrows("for (12 in [1]) print(12);", ReferenceError);
+assertThrows("for (eval('var x') in [1]) print(12);", ReferenceError);
+assertThrows("if (false) for (eval('0') in [1]) print(12);", ReferenceError);
// For:
-assertThrows("for (12 = 1;;) print(12);");
-assertThrows("for (eval('var x') = 1;;) print(12);");
-assertDoesNotThrow("if (false) for (eval('var x') = 1;;) print(12);");
+assertThrows("for (12 = 1;;) print(12);", ReferenceError);
+assertThrows("for (eval('var x') = 1;;) print(12);", ReferenceError);
+assertThrows("if (false) for (eval('var x') = 1;;) print(12);", ReferenceError);
// Assignments to 'this'.
-assertThrows("this = 42");
-assertDoesNotThrow("function f() { this = 12; }");
-assertThrows("for (this in {x:3, y:4, z:5}) ;");
-assertThrows("for (this = 0;;) ;");
-assertThrows("this++");
-assertThrows("++this");
-assertThrows("this--");
-assertThrows("--this");
+assertThrows("this = 42", ReferenceError);
+assertThrows("function f() { this = 12; }", ReferenceError);
+assertThrows("for (this in {x:3, y:4, z:5}) ;", ReferenceError);
+assertThrows("for (this = 0;;) ;", ReferenceError);
+assertThrows("this++", ReferenceError);
+assertThrows("++this", ReferenceError);
+assertThrows("this--", ReferenceError);
+assertThrows("--this", ReferenceError);
diff --git a/deps/v8/test/mjsunit/math-floor-of-div.js b/deps/v8/test/mjsunit/math-floor-of-div.js
index d528b8510..707f65714 100644
--- a/deps/v8/test/mjsunit/math-floor-of-div.js
+++ b/deps/v8/test/mjsunit/math-floor-of-div.js
@@ -286,3 +286,14 @@ test_div_deopt_div_by_zero_v();
test_div_deopt_minus_zero_v();
test_div_deopt_overflow_v();
test_div_deopt_div_by_zero_v();
+
+
+// Test for flooring division with negative dividend.
+function flooring_div_by_3(y) {
+ return Math.floor(y / 3);
+}
+
+assertEquals(-1, flooring_div_by_3(-2));
+assertEquals(-1, flooring_div_by_3(-2));
+%OptimizeFunctionOnNextCall(flooring_div_by_3);
+assertEquals(-1, flooring_div_by_3(-2));
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 9f2af491a..3283070c6 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -70,15 +70,15 @@
##############################################################################
# These use a built-in that's only present in debug mode. They take
# too long to run in debug mode on ARM and MIPS.
- 'fuzz-natives-part*': [PASS, ['mode == release or arch == arm or arch == android_arm or arch == mipsel', SKIP]],
+ 'fuzz-natives-part*': [PASS, ['mode == release or arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel', SKIP]],
- 'big-object-literal': [PASS, ['arch == arm or arch == android_arm', SKIP]],
+ 'big-object-literal': [PASS, ['arch == arm or arch == android_arm or arch == android_arm64', SKIP]],
# Issue 488: this test sometimes times out.
'array-constructor': [PASS, TIMEOUT],
# Very slow on ARM and MIPS, contains no architecture dependent code.
- 'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == android_arm or arch == mipsel', TIMEOUT]],
+ 'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel', TIMEOUT]],
##############################################################################
# This test expects to reach a certain recursion depth, which may not work
@@ -93,8 +93,8 @@
# This test sets the umask on a per-process basis and hence cannot be
# used in multi-threaded runs.
# On android there is no /tmp directory.
- 'd8-os': [PASS, ['isolates or arch == android_arm or arch == android_ia32', SKIP]],
- 'tools/tickprocessor': [PASS, ['arch == android_arm or arch == android_ia32', SKIP]],
+ 'd8-os': [PASS, ['isolates or arch == android_arm or arch == android_arm64 or arch == android_ia32', SKIP]],
+ 'tools/tickprocessor': [PASS, ['arch == android_arm or arch == android_arm64 or arch == android_ia32', SKIP]],
##############################################################################
# Long running test that reproduces memory leak and should be run manually.
@@ -134,9 +134,97 @@
'osr-elements-kind': [SKIP],
'regress/regress-165637': [SKIP],
'regress/regress-2249': [SKIP],
+ 'debug-stepout-scope-part8': [PASS, ['arch == arm ', FAIL]],
}], # 'gc_stress == True'
##############################################################################
+['arch == arm64 or arch == android_arm64', {
+
+ # Requires bigger stack size in the Genesis and if stack size is increased,
+ # the test requires too much time to run. However, the problem test covers
+ # should be platform-independent.
+ 'regress/regress-1132': [SKIP],
+
+ # Pass but take too long to run. Skip.
+ # Some similar tests (with fewer iterations) may be included in arm64-js
+ # tests.
+ 'compiler/regress-arguments': [SKIP],
+ 'compiler/regress-gvn': [SKIP],
+ 'compiler/regress-max-locals-for-osr': [SKIP],
+ 'compiler/regress-4': [SKIP],
+ 'compiler/regress-or': [SKIP],
+ 'compiler/regress-rep-change': [SKIP],
+ 'regress/regress-1117': [SKIP],
+ 'regress/regress-1145': [SKIP],
+ 'regress/regress-1849': [SKIP],
+ 'regress/regress-3247124': [SKIP],
+ 'regress/regress-634': [SKIP],
+ 'regress/regress-91008': [SKIP],
+ 'regress/regress-91010': [SKIP],
+ 'regress/regress-91013': [SKIP],
+ 'regress/regress-99167': [SKIP],
+
+ # Long running tests.
+ 'regress/regress-2185': [PASS, ['mode == debug', PASS, TIMEOUT]],
+ 'regress/regress-2185-2': [PASS, TIMEOUT],
+ 'whitespaces': [PASS, TIMEOUT, SLOW],
+
+ # Stack manipulations in LiveEdit is not implemented for this arch.
+ 'debug-liveedit-check-stack': [SKIP],
+ 'debug-liveedit-stack-padding': [SKIP],
+ 'debug-liveedit-restart-frame': [SKIP],
+ 'debug-liveedit-double-call': [SKIP],
+
+ # BUG(v8:3147). It works on other architectures by accident.
+ 'regress/regress-conditional-position': [FAIL],
+
+ # Slow tests.
+ 'array-concat': [PASS, SLOW],
+ 'array-constructor': [PASS, SLOW],
+ 'array-indexing': [PASS, SLOW],
+ 'array-reduce': [PASS, SLOW],
+ 'array-sort': [PASS, SLOW],
+ 'array-splice': [PASS, SLOW],
+ 'bit-not': [PASS, SLOW],
+ 'compiler/alloc-number': [PASS, SLOW],
+ 'compiler/osr-assert': [PASS, SLOW],
+ 'compiler/osr-warm': [PASS, SLOW],
+ 'compiler/osr-with-args': [PASS, SLOW],
+ 'debug-scopes': [PASS, SLOW],
+ 'generated-transition-stub': [PASS, SLOW],
+ 'json2': [PASS, SLOW],
+ 'math-floor-of-div-nosudiv': [PASS, SLOW],
+ 'math-floor-of-div': [PASS, SLOW],
+ 'mirror-object': [PASS, SLOW],
+ 'packed-elements': [PASS, SLOW],
+ 'regress/regress-1122': [PASS, SLOW],
+ 'regress/regress-2185-2': [PASS, SLOW],
+ 'regress/regress-2185': [PASS, SLOW],
+ 'regress/regress-2790': [PASS, SLOW],
+ 'regress/regress-331444': [PASS, SLOW],
+ 'regress/regress-490': [PASS, SLOW],
+ 'regress/regress-crbug-217858': [PASS, SLOW],
+ 'regress/regress-create-exception': [PASS, SLOW],
+ 'regress/regress-json-stringify-gc': [PASS, SLOW],
+ 'string-indexof-2': [PASS, SLOW],
+ 'unicodelctest-no-optimization': [PASS, SLOW],
+ 'unicodelctest': [PASS, SLOW],
+ 'unicode-test': [PASS, SLOW],
+}], # 'arch == arm64'
+
+['arch == arm64 and mode == debug and simulator_run == True', {
+
+ # Pass but take too long with the simulator in debug mode.
+ 'array-sort': [PASS, TIMEOUT],
+ 'packed-elements': [SKIP],
+ 'regexp-global': [SKIP],
+ 'compiler/alloc-numbers': [SKIP],
+ 'harmony/symbols': [SKIP],
+ # Issue 3219:
+ 'getters-on-elements': [PASS, ['gc_stress == True', FAIL]],
+}], # 'arch == arm64 and mode == debug and simulator_run == True'
+
+##############################################################################
['asan == True', {
# Skip tests not suitable for ASAN.
'big-array-literal': [SKIP],
@@ -157,7 +245,7 @@
'unicode-test': [PASS, ['mode == debug', PASS, FAIL]],
# Times out often in release mode on ARM.
- 'compiler/regress-stacktrace-methods': [PASS, PASS, ['mode == release', TIMEOUT]],
+ 'compiler/regress-stacktrace-methods': [PASS, ['mode == release', TIMEOUT]],
'array-splice': [PASS, TIMEOUT],
# Long running test.
@@ -208,7 +296,7 @@
'array-constructor': [PASS, ['mode == debug', SKIP]],
# Times out often in release mode on MIPS.
- 'compiler/regress-stacktrace-methods': [PASS, PASS, ['mode == release', TIMEOUT]],
+ 'compiler/regress-stacktrace-methods': [PASS, ['mode == release', TIMEOUT]],
'array-splice': [PASS, TIMEOUT],
# Long running test.
@@ -276,6 +364,9 @@
# Bug(v8:2978).
'lithium/MathExp': [PASS, FAIL],
+
+ # Lead to OOM:
+ 'string-oom-*': [SKIP],
}], # 'arch == nacl_ia32 or arch == nacl_x64'
##############################################################################
diff --git a/deps/v8/test/mjsunit/neuter-twice.js b/deps/v8/test/mjsunit/neuter-twice.js
new file mode 100644
index 000000000..3501cee43
--- /dev/null
+++ b/deps/v8/test/mjsunit/neuter-twice.js
@@ -0,0 +1,9 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+var ab = new ArrayBuffer(100);
+%ArrayBufferNeuter(ab);
+%ArrayBufferNeuter(ab);
diff --git a/deps/v8/test/mjsunit/pixel-array-rounding.js b/deps/v8/test/mjsunit/pixel-array-rounding.js
index b7db51c2c..b7db51c2c 100755..100644
--- a/deps/v8/test/mjsunit/pixel-array-rounding.js
+++ b/deps/v8/test/mjsunit/pixel-array-rounding.js
diff --git a/deps/v8/test/mjsunit/proto-accessor.js b/deps/v8/test/mjsunit/proto-accessor.js
index aca6ec542..b2e7d3466 100644
--- a/deps/v8/test/mjsunit/proto-accessor.js
+++ b/deps/v8/test/mjsunit/proto-accessor.js
@@ -25,57 +25,123 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --harmony-symbols
+
+// Fake Symbol if undefined, allowing test to run in non-Harmony mode as well.
+this.Symbol = typeof Symbol != 'undefined' ? Symbol : String;
+
+
var desc = Object.getOwnPropertyDescriptor(Object.prototype, "__proto__");
-assertEquals("function", typeof desc.get);
-assertEquals("function", typeof desc.set);
-assertDoesNotThrow("desc.get.call({})");
-assertDoesNotThrow("desc.set.call({}, {})");
+var getProto = desc.get;
+var setProto = desc.set;
+
+function TestNoPoisonPill() {
+ assertEquals("function", typeof desc.get);
+ assertEquals("function", typeof desc.set);
+ assertDoesNotThrow("desc.get.call({})");
+ assertDoesNotThrow("desc.set.call({}, {})");
+
+ var obj = {};
+ var obj2 = {};
+ desc.set.call(obj, obj2);
+ assertEquals(obj.__proto__, obj2);
+ assertEquals(desc.get.call(obj), obj2);
+}
+TestNoPoisonPill();
+
+
+function TestRedefineObjectPrototypeProtoGetter() {
+ Object.defineProperty(Object.prototype, "__proto__", {
+ get: function() {
+ return 42;
+ }
+ });
+ assertEquals({}.__proto__, 42);
+ assertEquals(desc.get.call({}), Object.prototype);
+
+ var desc2 = Object.getOwnPropertyDescriptor(Object.prototype, "__proto__");
+ assertEquals(desc2.get.call({}), 42);
+ assertEquals(desc2.set.call({}), undefined);
+
+ Object.defineProperty(Object.prototype, "__proto__", {
+ set: function(x) {}
+ });
+ var desc3 = Object.getOwnPropertyDescriptor(Object.prototype, "__proto__");
+ assertEquals(desc3.get.call({}), 42);
+ assertEquals(desc3.set.call({}), undefined);
+}
+TestRedefineObjectPrototypeProtoGetter();
-var obj = {};
-var obj2 = {};
-desc.set.call(obj, obj2);
-assertEquals(obj.__proto__, obj2);
-assertEquals(desc.get.call(obj), obj2);
+function TestRedefineObjectPrototypeProtoSetter() {
+ Object.defineProperty(Object.prototype, "__proto__", { set: undefined });
+ assertThrows(function() {
+ "use strict";
+ var o = {};
+ var p = {};
+ o.__proto__ = p;
+ }, TypeError);
+}
+TestRedefineObjectPrototypeProtoSetter();
-// Check that any redefinition of the __proto__ accessor works.
-Object.defineProperty(Object.prototype, "__proto__", {
- get: function() {
- return 42;
+function TestGetProtoOfValues() {
+ assertEquals(getProto.call(1), Number.prototype);
+ assertEquals(getProto.call(true), Boolean.prototype);
+ assertEquals(getProto.call(false), Boolean.prototype);
+ assertEquals(getProto.call('s'), String.prototype);
+ assertEquals(getProto.call(Symbol()), Symbol.prototype);
+
+ assertThrows(function() { getProto.call(null); }, TypeError);
+ assertThrows(function() { getProto.call(undefined); }, TypeError);
+}
+TestGetProtoOfValues();
+
+
+var values = [1, true, false, 's', Symbol()];
+
+
+function TestSetProtoOfValues() {
+ var proto = {};
+ for (var i = 0; i < values.length; i++) {
+ assertEquals(setProto.call(values[i], proto), undefined);
}
-});
-assertEquals({}.__proto__, 42);
-assertEquals(desc.get.call({}), Object.prototype);
+
+ assertThrows(function() { setProto.call(null, proto); }, TypeError);
+ assertThrows(function() { setProto.call(undefined, proto); }, TypeError);
+}
+TestSetProtoOfValues();
-var desc2 = Object.getOwnPropertyDescriptor(Object.prototype, "__proto__");
-assertEquals(desc2.get.call({}), 42);
-assertDoesNotThrow("desc2.set.call({})");
+function TestSetProtoToValue() {
+ var object = {};
+ var proto = {};
+ setProto.call(object, proto);
+ var valuesWithUndefined = values.concat(undefined);
+
+ for (var i = 0; i < valuesWithUndefined.length; i++) {
+ assertEquals(setProto.call(object, valuesWithUndefined[i]), undefined);
+ assertEquals(getProto.call(object), proto);
+ }
-Object.defineProperty(Object.prototype, "__proto__", { set:function(x){} });
-var desc3 = Object.getOwnPropertyDescriptor(Object.prototype, "__proto__");
-assertDoesNotThrow("desc3.get.call({})");
-assertDoesNotThrow("desc3.set.call({})");
+ // null is the only valid value that can be used as a [[Prototype]].
+ assertEquals(setProto.call(object, null), undefined);
+ assertEquals(getProto.call(object), null);
+}
+TestSetProtoToValue();
-Object.defineProperty(Object.prototype, "__proto__", { set: undefined });
-assertThrows(function() {
- "use strict";
+function TestDeleteProto() {
+ assertTrue(delete Object.prototype.__proto__);
var o = {};
var p = {};
o.__proto__ = p;
-}, TypeError);
-
-
-assertTrue(delete Object.prototype.__proto__);
-var o = {};
-var p = {};
-o.__proto__ = p;
-assertEquals(Object.getPrototypeOf(o), Object.prototype);
-var desc4 = Object.getOwnPropertyDescriptor(o, "__proto__");
-assertTrue(desc4.configurable);
-assertTrue(desc4.enumerable);
-assertTrue(desc4.writable);
-assertEquals(desc4.value, p);
+ assertEquals(Object.getPrototypeOf(o), Object.prototype);
+ var desc4 = Object.getOwnPropertyDescriptor(o, "__proto__");
+ assertTrue(desc4.configurable);
+ assertTrue(desc4.enumerable);
+ assertTrue(desc4.writable);
+ assertEquals(desc4.value, p);
+}
+TestDeleteProto();
diff --git a/deps/v8/test/mjsunit/readonly.js b/deps/v8/test/mjsunit/readonly.js
index 4d06b7cf4..050e25627 100644
--- a/deps/v8/test/mjsunit/readonly.js
+++ b/deps/v8/test/mjsunit/readonly.js
@@ -25,7 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --harmony-proxies --es5_readonly
+// Flags: --allow-natives-syntax --es5_readonly
+// Flags: --harmony-proxies
// Different ways to create an object.
@@ -120,8 +121,12 @@ function ReadonlyByProto(o, name) {
o.__proto__ = p;
}
+// Allow Proxy to be undefined, so test can run in non-Harmony mode as well.
+var global = this;
+
function ReadonlyByProxy(o, name) {
- var p = Proxy.create({
+ if (!global.Proxy) return ReadonlyByFreeze(o, name); // Dummy.
+ var p = global.Proxy.create({
getPropertyDescriptor: function() {
return {value: -46, writable: false, configurable: true};
}
diff --git a/deps/v8/test/mjsunit/regexp-capture-3.js b/deps/v8/test/mjsunit/regexp-capture-3.js
index 4c27ea454..4c27ea454 100755..100644
--- a/deps/v8/test/mjsunit/regexp-capture-3.js
+++ b/deps/v8/test/mjsunit/regexp-capture-3.js
diff --git a/deps/v8/test/mjsunit/regexp-capture.js b/deps/v8/test/mjsunit/regexp-capture.js
index 307309482..307309482 100755..100644
--- a/deps/v8/test/mjsunit/regexp-capture.js
+++ b/deps/v8/test/mjsunit/regexp-capture.js
diff --git a/deps/v8/test/mjsunit/regress-3225.js b/deps/v8/test/mjsunit/regress-3225.js
new file mode 100644
index 000000000..357f94b24
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-3225.js
@@ -0,0 +1,48 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --harmony-generators
+
+Debug = debug.Debug
+
+var debug_step = 0;
+var failure = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ if (debug_step == 0) {
+ assertEquals(1, exec_state.frame(0).evaluate('a').value());
+ assertEquals(3, exec_state.frame(0).evaluate('b').value());
+ exec_state.frame(0).evaluate("a = 4").value();
+ debug_step++;
+ } else {
+ assertEquals(4, exec_state.frame(0).evaluate('a').value());
+ assertEquals(3, exec_state.frame(0).evaluate('b').value());
+ exec_state.frame(0).evaluate("b = 5").value();
+ }
+ } catch (e) {
+ failure = e;
+ }
+}
+
+Debug.setListener(listener);
+
+function* generator(a, b) {
+ var b = 3; // Shadows a parameter.
+ debugger;
+ yield a;
+ yield b;
+ debugger;
+ return b;
+}
+
+var foo = generator(1, 2);
+
+assertEquals(4, foo.next().value);
+assertEquals(3, foo.next().value);
+assertEquals(5, foo.next().value);
+assertNull(failure);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/regress-keyed-store-non-strict-arguments.js b/deps/v8/test/mjsunit/regress-keyed-store-non-strict-arguments.js
new file mode 100644
index 000000000..865d600ad
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-keyed-store-non-strict-arguments.js
@@ -0,0 +1,16 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function args(arg) { return arguments; }
+var a = args(false);
+
+(function () {
+ "use strict";
+ a["const" + 0] = 0;
+})();
+
+(function () {
+ "use strict";
+ a[0] = 0;
+})();
diff --git a/deps/v8/test/mjsunit/regress-sync-optimized-lists.js b/deps/v8/test/mjsunit/regress-sync-optimized-lists.js
new file mode 100644
index 000000000..f07c12b2c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-sync-optimized-lists.js
@@ -0,0 +1,45 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --block-concurrent-recompilation
+// Flags: --no-concurrent-osr
+
+function Ctor() {
+ this.a = 1;
+}
+
+function get_closure() {
+ return function add_field(obj, osr) {
+ obj.c = 3;
+ var x = 0;
+ if (osr) {
+ %OptimizeFunctionOnNextCall(add_field, "osr");
+ }
+ for (var i = 0; i < 10; i++) {
+ x = i + 1;
+ }
+ return x;
+ }
+}
+
+var f1 = get_closure();
+f1(new Ctor(), false);
+f1(new Ctor(), false);
+
+%OptimizeFunctionOnNextCall(f1, "concurrent");
+
+// Kick off concurrent recompilation and OSR.
+var o = new Ctor();
+f1(o, true);
+assertOptimized(f1, "no sync");
+
+// Flush the optimizing compiler's queue.
+%NotifyContextDisposed();
+assertUnoptimized(f1, "no sync");
+
+// Trigger deopt.
+o.c = 2.2;
+
+var f2 = get_closure();
+f2(new Ctor(), true);
diff --git a/deps/v8/test/mjsunit/regress/compare-map-elim1.js b/deps/v8/test/mjsunit/regress/compare-map-elim1.js
new file mode 100644
index 000000000..c7ea05def
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/compare-map-elim1.js
@@ -0,0 +1,57 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --check-elimination
+
+
+function foo(o) {
+ return o.foo1;
+}
+
+function getter() {
+ return this.x + this.z + foo2(this);
+}
+
+function foo2(o) {
+ return o.a;
+}
+
+var o1 = {z:0, x:1};
+var o2 = {z:0, a:1.5, x:1};
+var o3 = {z:0, a:1.5};
+Object.defineProperty(o1, "foo1", {get:getter});
+Object.defineProperty(o2, "foo1", {get:getter});
+
+foo(o1);
+foo(o1);
+foo(o2);
+%ClearFunctionTypeFeedback(foo2);
+foo2(o2);
+foo2(o2);
+foo2(o3);
+%OptimizeFunctionOnNextCall(foo);
+foo(o1);
diff --git a/deps/v8/test/mjsunit/regress/comparison-in-effect-context-deopt.js b/deps/v8/test/mjsunit/regress/comparison-in-effect-context-deopt.js
new file mode 100644
index 000000000..b28dff73a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/comparison-in-effect-context-deopt.js
@@ -0,0 +1,47 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function lazyDeopt() {
+ %DeoptimizeFunction(test);
+ return "deopt";
+}
+
+var x = { toString : lazyDeopt };
+
+function g(x) {
+ return "result";
+}
+
+function test(x) {
+ return g(void(x == ""));
+}
+
+test(x);
+%OptimizeFunctionOnNextCall(test);
+assertEquals("result", test(x));
diff --git a/deps/v8/test/mjsunit/regress/number-named-call-deopt.js b/deps/v8/test/mjsunit/regress/number-named-call-deopt.js
new file mode 100644
index 000000000..1598af12b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/number-named-call-deopt.js
@@ -0,0 +1,41 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f(x, deopt, osr) {
+ var res = "result";
+ void(x.toString(10, deopt + 0));
+ if (osr) for (var i = 0; i < 100000; i++) { }
+ return res;
+}
+
+f(4, 0, false);
+f(4, 0, false);
+f(4, 0, false);
+%OptimizeFunctionOnNextCall(f);
+assertEquals("result", f(4, "deopt", true));
diff --git a/deps/v8/test/mjsunit/regress/polymorphic-accessor-test-context.js b/deps/v8/test/mjsunit/regress/polymorphic-accessor-test-context.js
new file mode 100644
index 000000000..618827924
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/polymorphic-accessor-test-context.js
@@ -0,0 +1,25 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function t1() { return this instanceof t1; }
+function t2() { return this instanceof t2; }
+
+var o1 = new (function() { })();
+Object.defineProperty(o1, "t", {get:function() { return this instanceof o1.constructor; }});
+var o2 = new (function() { })();
+Object.defineProperty(o2, "t", {get:function() { return this instanceof o1.constructor; }});
+var o3 = new (function() { })();
+o3.t = true;
+
+function f(o) {
+ return 1 + (o.t ? 1 : 2);
+}
+
+f(o1);
+f(o1);
+f(o2);
+%OptimizeFunctionOnNextCall(f);
+f(o3);
diff --git a/deps/v8/test/mjsunit/regress/regress-2273.js b/deps/v8/test/mjsunit/regress/regress-2273.js
index 7868b8da2..76b5ab6aa 100644
--- a/deps/v8/test/mjsunit/regress/regress-2273.js
+++ b/deps/v8/test/mjsunit/regress/regress-2273.js
@@ -79,7 +79,7 @@ function strict_mode() {
};
strict_mode();
-function classic_mode() {
+function sloppy_mode() {
CheckStringReceiver.call("foo");
CheckNumberReceiver.call(42);
CheckUndefinedReceiver.call(undefined);
@@ -100,4 +100,4 @@ function classic_mode() {
[4].some(CheckCoersion, 42);
[5].map(CheckCoersion, 42);
};
-classic_mode();
+sloppy_mode();
diff --git a/deps/v8/test/mjsunit/regress/regress-2318.js b/deps/v8/test/mjsunit/regress/regress-2318.js
index 5fa8a4f96..e31e0f904 100644
--- a/deps/v8/test/mjsunit/regress/regress-2318.js
+++ b/deps/v8/test/mjsunit/regress/regress-2318.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --nostack-trace-on-abort --stack-size=100
+// Flags: --expose-debug-as debug --nostack-trace-on-abort --stack-size=150
function f() {
var i = 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-2564.js b/deps/v8/test/mjsunit/regress/regress-2564.js
index 1d7cdaeae..21b40e0b0 100644
--- a/deps/v8/test/mjsunit/regress/regress-2564.js
+++ b/deps/v8/test/mjsunit/regress/regress-2564.js
@@ -66,7 +66,7 @@ Error.prepareStackTrace = function(error, frames) {
try {
assertEquals(5, frames.length);
for (var i = 0; i < 2; i++) {
- // The first two frames are still classic mode.
+ // The first two frames are still sloppy mode.
assertEquals(o[i], frames[i].getFunction());
assertEquals(o, frames[i].getThis());
}
diff --git a/deps/v8/test/mjsunit/regress/regress-3032.js b/deps/v8/test/mjsunit/regress/regress-3032.js
index ae5454375..ae5454375 100755..100644
--- a/deps/v8/test/mjsunit/regress/regress-3032.js
+++ b/deps/v8/test/mjsunit/regress/regress-3032.js
diff --git a/deps/v8/test/mjsunit/regress/regress-3135.js b/deps/v8/test/mjsunit/regress/regress-3135.js
new file mode 100644
index 000000000..f15c9a86d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3135.js
@@ -0,0 +1,73 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Properties are serialized once.
+assertEquals('{"x":1}', JSON.stringify({ x : 1 }, ["x", 1, "x", 1]));
+assertEquals('{"1":1}', JSON.stringify({ 1 : 1 }, ["x", 1, "x", 1]));
+assertEquals('{"1":1}', JSON.stringify({ 1 : 1 }, ["1", 1, "1", 1]));
+assertEquals('{"1":1}', JSON.stringify({ 1 : 1 }, [1, "1", 1, "1"]));
+
+// Properties are visited at most once.
+var fired = 0;
+var getter_obj = { get x() { fired++; return 2; } };
+assertEquals('{"x":2}', JSON.stringify(getter_obj, ["x", "y", "x"]));
+assertEquals(1, fired);
+
+// Order of the replacer array is followed.
+assertEquals('{"y":4,"x":3}', JSON.stringify({ x : 3, y : 4}, ["y", "x"]));
+assertEquals('{"y":4,"1":2,"x":3}',
+ JSON.stringify({ x : 3, y : 4, 1 : 2 }, ["y", 1, "x"]));
+
+// With a replacer array the value of the property is retrieved using [[Get]]
+// ignoring own and enumerability.
+var a = { x : 8 };
+assertEquals('{"__proto__":{"__proto__":null},"x":8}',
+ JSON.stringify(a, ["__proto__", "x", "__proto__"]));
+a.__proto__ = { x : 7 };
+assertEquals('{"__proto__":{"__proto__":{"__proto__":null},"x":7},"x":8}',
+ JSON.stringify(a, ["__proto__", "x"]));
+var b = { __proto__: { x: 9 } };
+assertEquals('{}', JSON.stringify(b));
+assertEquals('{"x":9}', JSON.stringify(b, ["x"]));
+var c = {x: 10};
+Object.defineProperty(c, 'x', { enumerable: false });
+assertEquals('{}', JSON.stringify(c));
+assertEquals('{"x":10}', JSON.stringify(c, ["x"]));
+
+// Arrays are not affected by the replacer array.
+assertEquals("[9,8,7]", JSON.stringify([9, 8, 7], [1, 1]));
+var mixed_arr = [11,12,13];
+mixed_arr.x = 10;
+assertEquals('[11,12,13]', JSON.stringify(mixed_arr, [1, 0, 1]));
+
+// Array elements of objects are affected.
+var mixed_obj = { x : 3 };
+mixed_obj[0] = 6;
+mixed_obj[1] = 5;
+assertEquals('{"1":5,"0":6}', JSON.stringify(mixed_obj, [1, 0, 1]));
+
+// Nested object.
+assertEquals('{"z":{"x":3},"x":1}',
+ JSON.stringify({ x: 1, y:2, z: {x:3, b:4}}, ["z","x"]));
+
+// Objects in the replacer array are ignored.
+assertEquals('{}',
+ JSON.stringify({ x : 1, "1": 1 }, [{}]));
+assertEquals('{}',
+ JSON.stringify({ x : 1, "1": 1 }, [true, undefined, null]));
+assertEquals('{}',
+ JSON.stringify({ x : 1, "1": 1 },
+ [{ toString: function() { return "x";} }]));
+assertEquals('{}',
+ JSON.stringify({ x : 1, "1": 1 },
+ [{ valueOf: function() { return 1;} }]));
+
+// Make sure that property names that clash with the names of Object.prototype
+// still works.
+assertEquals('{"toString":42}', JSON.stringify({ toString: 42 }, ["toString"]));
+
+// Number wrappers and String wrappers should be unwrapped.
+assertEquals('{"1":1,"s":"s"}',
+ JSON.stringify({ 1: 1, s: "s" },
+ [new Number(1), new String("s")]));
diff --git a/deps/v8/test/mjsunit/regress/regress-3138.js b/deps/v8/test/mjsunit/regress/regress-3138.js
new file mode 100644
index 000000000..acb121d2b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3138.js
@@ -0,0 +1,40 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function f(){
+ assertEquals("function", typeof f);
+})();
+
+(function f(){
+ var f; // Variable shadows function name.
+ assertEquals("undefined", typeof f);
+})();
+
+(function f(){
+ var f;
+ assertEquals("undefined", typeof f);
+ with ({}); // Force context allocation of both variable and function name.
+})();
+
+assertEquals("undefined", typeof f);
+
+// var initialization is intercepted by with scope.
+(function() {
+ var o = { a: 1 };
+ with (o) {
+ var a = 2;
+ }
+ assertEquals("undefined", typeof a);
+ assertEquals(2, o.a);
+})();
+
+// const initialization is not intercepted by with scope.
+(function() {
+ var o = { a: 1 };
+ with (o) {
+ const a = 2;
+ }
+ assertEquals(2, a);
+ assertEquals(1, o.a);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-3158.js b/deps/v8/test/mjsunit/regress/regress-3158.js
new file mode 100644
index 000000000..c69127395
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3158.js
@@ -0,0 +1,24 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+Array.prototype[0] = 'a';
+delete Array.prototype[0];
+
+function foo(a, i) {
+ return a[i];
+}
+
+var a = new Array(100000);
+a[3] = 'x';
+
+foo(a, 3);
+foo(a, 3);
+foo(a, 3);
+%OptimizeFunctionOnNextCall(foo);
+foo(a, 3);
+Array.prototype[0] = 'a';
+var z = foo(a, 0);
+assertEquals('a', z);
diff --git a/deps/v8/test/mjsunit/regress/regress-3159.js b/deps/v8/test/mjsunit/regress/regress-3159.js
new file mode 100644
index 000000000..cfc8a39b8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3159.js
@@ -0,0 +1,10 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+try {
+ new Uint32Array(new ArrayBuffer(1), 2, 3);
+} catch (e) {
+ assertEquals("start offset of Uint32Array should be a multiple of 4",
+ e.message);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-3183.js b/deps/v8/test/mjsunit/regress/regress-3183.js
new file mode 100644
index 000000000..0c915b0ae
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3183.js
@@ -0,0 +1,96 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+(function DeoptimizeArgCallFunctionGeneric() {
+ var a = [];
+
+ function f1(method, array, elem, deopt) {
+ assertEquals('push', method);
+ }
+
+ function f2() { }
+
+ function bar(x, deopt, f) {
+ f('push', a, [x], deopt + 0);
+ }
+
+ function foo() { return bar(arguments[0], arguments[1], arguments[2]); }
+ function baz(f, deopt) { return foo("x", deopt, f); }
+
+ baz(f1, 0);
+ baz(f2, 0);
+ %OptimizeFunctionOnNextCall(baz);
+ baz(f1, "deopt");
+})();
+
+
+(function DeoptimizeArgGlobalFunctionGeneric() {
+ var a = [];
+
+ var f1;
+
+ f1 = function(method, array, elem, deopt) {
+ assertEquals('push', method);
+ }
+
+ function bar(x, deopt, f) {
+ f1('push', a, [x], deopt + 0);
+ }
+
+ function foo() { return bar(arguments[0], arguments[1]); }
+ function baz(deopt) { return foo("x", deopt); }
+
+ baz(0);
+ baz(0);
+ %OptimizeFunctionOnNextCall(baz);
+ baz("deopt");
+})();
+
+
+(function DeoptimizeArgCallFunctionRuntime() {
+ var a = [];
+
+ var f1;
+
+ f1 = function(method, array, elem, deopt) {
+ assertEquals('push', method);
+ }
+
+ function bar(x, deopt) {
+ %_CallFunction(null, 'push', [x][0], ((deopt + 0), 1), f1);
+ }
+
+ function foo() { return bar(arguments[0], arguments[1]); }
+ function baz(deopt) { return foo(0, deopt); }
+
+ baz(0);
+ baz(0);
+ %OptimizeFunctionOnNextCall(baz);
+ baz("deopt");
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-319722-ArrayBuffer.js b/deps/v8/test/mjsunit/regress/regress-319722-ArrayBuffer.js
index 4a48a61ab..9a24fc5c7 100644
--- a/deps/v8/test/mjsunit/regress/regress-319722-ArrayBuffer.js
+++ b/deps/v8/test/mjsunit/regress/regress-319722-ArrayBuffer.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --nostress-opt --allow-natives-syntax --mock-arraybuffer-allocator
-var maxSize = %MaxSmi() + 1;
+var maxSize = %_MaxSmi() + 1;
var ab;
// Allocate the largest ArrayBuffer we can on this architecture.
diff --git a/deps/v8/test/mjsunit/regress/regress-319722-TypedArrays.js b/deps/v8/test/mjsunit/regress/regress-319722-TypedArrays.js
index 0445e2d2c..e497aecbe 100644
--- a/deps/v8/test/mjsunit/regress/regress-319722-TypedArrays.js
+++ b/deps/v8/test/mjsunit/regress/regress-319722-TypedArrays.js
@@ -27,7 +27,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --nostress-opt --allow-natives-syntax
-var maxSize = %MaxSmi() + 1;
+var maxSize = %_MaxSmi() + 1;
function TestArray(constr) {
assertThrows(function() {
new constr(maxSize);
diff --git a/deps/v8/test/mjsunit/regress/regress-3204.js b/deps/v8/test/mjsunit/regress/regress-3204.js
new file mode 100644
index 000000000..dc754ff2d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3204.js
@@ -0,0 +1,25 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function ModILeftCanBeNegative() {
+ var x = 0;
+ for (var i = -1; i < 0; ++i) x = i % 2;
+ return x;
+}
+
+ModILeftCanBeNegative();
+%OptimizeFunctionOnNextCall(ModILeftCanBeNegative);
+assertEquals(-1, ModILeftCanBeNegative());
+
+function ModIRightCanBeZero() {
+ var x = 0;
+ for (var i = -1; i <= 0; ++i) x = (2 % i) | 0;
+ return x;
+}
+
+ModIRightCanBeZero();
+%OptimizeFunctionOnNextCall(ModIRightCanBeZero);
+ModIRightCanBeZero();
diff --git a/deps/v8/test/mjsunit/regress/regress-3220.js b/deps/v8/test/mjsunit/regress/regress-3220.js
new file mode 100644
index 000000000..6f8e8c8f0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3220.js
@@ -0,0 +1,30 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --use-strict
+
+String(new Date());
diff --git a/deps/v8/test/mjsunit/regress-330046.js b/deps/v8/test/mjsunit/regress/regress-330046.js
index d94b804ac..d94b804ac 100644
--- a/deps/v8/test/mjsunit/regress-330046.js
+++ b/deps/v8/test/mjsunit/regress/regress-330046.js
diff --git a/deps/v8/test/mjsunit/regress-333594.js b/deps/v8/test/mjsunit/regress/regress-333594.js
index 6f6dbaafc..6f6dbaafc 100644
--- a/deps/v8/test/mjsunit/regress-333594.js
+++ b/deps/v8/test/mjsunit/regress/regress-333594.js
diff --git a/deps/v8/test/mjsunit/regress/regress-343609.js b/deps/v8/test/mjsunit/regress/regress-343609.js
new file mode 100644
index 000000000..5205ca133
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-343609.js
@@ -0,0 +1,66 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --block-concurrent-recompilation
+// Flags: --no-concurrent-osr --expose-gc
+
+function Ctor() {
+ this.a = 1;
+}
+
+function get_closure() {
+ return function add_field(obj) {
+ obj.c = 3;
+ obj.a = obj.a + obj.c;
+ return obj.a;
+ }
+}
+function get_closure2() {
+ return function cc(obj) {
+ obj.c = 3;
+ obj.a = obj.a + obj.c;
+ }
+}
+
+function dummy() {
+ (function () {
+ var o = {c: 10};
+ var f1 = get_closure2();
+ f1(o);
+ f1(o);
+ %OptimizeFunctionOnNextCall(f1);
+ f1(o);
+ })();
+}
+
+var o = new Ctor();
+function opt() {
+ (function () {
+ var f1 = get_closure();
+ f1(new Ctor());
+ f1(new Ctor());
+ %OptimizeFunctionOnNextCall(f1);
+ f1(o);
+ })();
+}
+
+// Optimize add_field and install its code in optimized code cache.
+opt();
+opt();
+opt();
+
+// Optimize dummy function to remove the add_field from head of optimized
+// function list in the context.
+dummy();
+dummy();
+
+// Kill add_field in new space GC.
+for(var i = 0; i < 3; i++) gc(true);
+
+// Trigger deopt.
+o.c = 2.2;
+
+// Fetch optimized code of add_field from cache and crash.
+var f2 = get_closure();
+f2(new Ctor());
diff --git a/deps/v8/test/mjsunit/regress/regress-346587.js b/deps/v8/test/mjsunit/regress/regress-346587.js
new file mode 100644
index 000000000..40e3ac116
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-346587.js
@@ -0,0 +1,18 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --fold-constants --allow-natives-syntax
+
+function bar(obj) {
+ assertTrue(obj.x === 'baz');
+}
+
+function foo() {
+ bar({ x : 'baz' });
+}
+
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-347530.js b/deps/v8/test/mjsunit/regress/regress-347530.js
new file mode 100644
index 000000000..330fda38c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-347530.js
@@ -0,0 +1,12 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc
+a = [];
+a[1000] = .1;
+a.length = 0;
+gc();
+gc();
+a[1000] = .1;
+assertEquals(.1, a[1000]);
diff --git a/deps/v8/test/mjsunit/regress/regress-347542.js b/deps/v8/test/mjsunit/regress/regress-347542.js
new file mode 100644
index 000000000..901d798fb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-347542.js
@@ -0,0 +1,11 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {}
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
+%NeverOptimizeFunction(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-347906.js b/deps/v8/test/mjsunit/regress/regress-347906.js
new file mode 100644
index 000000000..c75161892
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-347906.js
@@ -0,0 +1,14 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony
+
+function foo() {
+ return Math.clz32(12.34);
+}
+
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-347912.js b/deps/v8/test/mjsunit/regress/regress-347912.js
new file mode 100644
index 000000000..b609e36c3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-347912.js
@@ -0,0 +1,10 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var __v_4 = {};
+__v_2 = {};
+__v_2[1024] = 0;
+%DebugPrint(__v_4);
diff --git a/deps/v8/test/mjsunit/regress/regress-347914.js b/deps/v8/test/mjsunit/regress/regress-347914.js
new file mode 100644
index 000000000..bc4dcd7f7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-347914.js
@@ -0,0 +1,89 @@
+ // Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --debug-code --gc-interval=201 --verify-heap --max-inlined-source-size=999999 --max-inlined-nodes=999999 --max-inlined-nodes-cumulative=999999
+
+// Begin stripped down and modified version of mjsunit.js for easy minimization in CF.
+function MjsUnitAssertionError(message) {}
+MjsUnitAssertionError.prototype.toString = function () { return this.message; };
+var assertSame;
+var assertEquals;
+var assertEqualsDelta;
+var assertArrayEquals;
+var assertPropertiesEqual;
+var assertToStringEquals;
+var assertTrue;
+var assertFalse;
+var triggerAssertFalse;
+var assertNull;
+var assertNotNull;
+var assertThrows;
+var assertDoesNotThrow;
+var assertInstanceof;
+var assertUnreachable;
+var assertOptimized;
+var assertUnoptimized;
+function classOf(object) { var string = Object.prototype.toString.call(object); return string.substring(8, string.length - 1); }
+function PrettyPrint(value) { return ""; }
+function PrettyPrintArrayElement(value, index, array) { return ""; }
+function fail(expectedText, found, name_opt) { }
+function deepObjectEquals(a, b) { var aProps = Object.keys(a); aProps.sort(); var bProps = Object.keys(b); bProps.sort(); if (!deepEquals(aProps, bProps)) { return false; } for (var i = 0; i < aProps.length; i++) { if (!deepEquals(a[aProps[i]], b[aProps[i]])) { return false; } } return true; }
+function deepEquals(a, b) { if (a === b) { if (a === 0) return (1 / a) === (1 / b); return true; } if (typeof a != typeof b) return false; if (typeof a == "number") return isNaN(a) && isNaN(b); if (typeof a !== "object" && typeof a !== "function") return false; var objectClass = classOf(a); if (objectClass !== classOf(b)) return false; if (objectClass === "RegExp") { return (a.toString() === b.toString()); } if (objectClass === "Function") return false; if (objectClass === "Array") { var elementCount = 0; if (a.length != b.length) { return false; } for (var i = 0; i < a.length; i++) { if (!deepEquals(a[i], b[i])) return false; } return true; } if (objectClass == "String" || objectClass == "Number" || objectClass == "Boolean" || objectClass == "Date") { if (a.valueOf() !== b.valueOf()) return false; } return deepObjectEquals(a, b); }
+assertSame = function assertSame(expected, found, name_opt) { if (found === expected) { if (expected !== 0 || (1 / expected) == (1 / found)) return; } else if ((expected !== expected) && (found !== found)) { return; } fail(PrettyPrint(expected), found, name_opt); }; assertEquals = function assertEquals(expected, found, name_opt) { if (!deepEquals(found, expected)) { fail(PrettyPrint(expected), found, name_opt); } };
+assertEqualsDelta = function assertEqualsDelta(expected, found, delta, name_opt) { assertTrue(Math.abs(expected - found) <= delta, name_opt); }; assertArrayEquals = function assertArrayEquals(expected, found, name_opt) { var start = ""; if (name_opt) { start = name_opt + " - "; } assertEquals(expected.length, found.length, start + "array length"); if (expected.length == found.length) { for (var i = 0; i < expected.length; ++i) { assertEquals(expected[i], found[i], start + "array element at index " + i); } } };
+assertPropertiesEqual = function assertPropertiesEqual(expected, found, name_opt) { if (!deepObjectEquals(expected, found)) { fail(expected, found, name_opt); } };
+assertToStringEquals = function assertToStringEquals(expected, found, name_opt) { if (expected != String(found)) { fail(expected, found, name_opt); } };
+assertTrue = function assertTrue(value, name_opt) { assertEquals(true, value, name_opt); };
+assertFalse = function assertFalse(value, name_opt) { assertEquals(false, value, name_opt); };
+
+assertNull = function assertNull(value, name_opt) { if (value !== null) { fail("null", value, name_opt); } };
+assertNotNull = function assertNotNull(value, name_opt) { if (value === null) { fail("not null", value, name_opt); } };
+as1sertThrows = function assertThrows(code, type_opt, cause_opt) { var threwException = true; try { if (typeof code == 'function') { code(); } else { eval(code); } threwException = false; } catch (e) { if (typeof type_opt == 'function') { assertInstanceof(e, type_opt); } if (arguments.length >= 3) { assertEquals(e.type, cause_opt); } return; } };
+assertInstanceof = function assertInstanceof(obj, type) { if (!(obj instanceof type)) { var actualTypeName = null; var actualConstructor = Object.getPrototypeOf(obj).constructor; if (typeof actualConstructor == "function") { actualTypeName = actualConstructor.name || String(actualConstructor); } fail("Object <" + PrettyPrint(obj) + "> is not an instance of <" + (type.name || type) + ">" + (actualTypeName ? " but of < " + actualTypeName + ">" : "")); } };
+assertDoesNotThrow = function assertDoesNotThrow(code, name_opt) { try { if (typeof code == 'function') { code(); } else { eval(code); } } catch (e) { fail("threw an exception: ", e.message || e, name_opt); } };
+assertUnreachable = function assertUnreachable(name_opt) { var message = "Fail" + "ure: unreachable"; if (name_opt) { message += " - " + name_opt; } };
+var OptimizationStatus;
+try { OptimizationStatus = new Function("fun", "sync", "return %GetOptimizationStatus(fun, sync);"); } catch (e) { OptimizationStatus = function() { } }
+assertUnoptimized = function assertUnoptimized(fun, sync_opt, name_opt) { if (sync_opt === undefined) sync_opt = ""; assertTrue(OptimizationStatus(fun, sync_opt) != 1, name_opt); }
+assertOptimized = function assertOptimized(fun, sync_opt, name_opt) { if (sync_opt === undefined) sync_opt = ""; assertTrue(OptimizationStatus(fun, sync_opt) != 2, name_opt); }
+triggerAssertFalse = function() { }
+// End stripped down and modified version of mjsunit.js.
+
+var __v_1 = {};
+var __v_2 = {};
+var __v_3 = {};
+var __v_4 = {};
+var __v_5 = {};
+var __v_6 = {};
+var __v_7 = {};
+var __v_8 = {};
+var __v_9 = {};
+var __v_10 = {};
+var __v_0 = 'fisk';
+assertEquals('fisk', __v_0);
+var __v_0;
+assertEquals('fisk', __v_0);
+var __v_6 = 'hest';
+assertEquals('hest', __v_0);
+this.bar = 'fisk';
+assertEquals('fisk', __v_1);
+__v_1;
+assertEquals('fisk', __v_1);
+__v_1 = 'hest';
+assertEquals('hest', __v_1);
+
+function __f_0(o) {
+ o.g();
+ if (!o.g()) {
+ assertTrue(false);
+ }
+}
+__v_4 = {};
+__v_4.size = function() { return 42; }
+__v_4.g = function() { return this.size(); };
+__f_0({g: __v_4.g, size:__v_4.size});
+for (var __v_0 = 0; __v_0 < 5; __v_0++) __f_0(__v_4);
+%OptimizeFunctionOnNextCall(__f_0);
+__f_0(__v_4);
+__f_0({g: __v_4.g, size:__v_4.size});
diff --git a/deps/v8/test/mjsunit/regress/regress-348280.js b/deps/v8/test/mjsunit/regress/regress-348280.js
new file mode 100644
index 000000000..319c270be
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-348280.js
@@ -0,0 +1,16 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function baz(f) { f(); }
+function goo() {}
+baz(goo);
+baz(goo);
+
+function bar(p) { if (p == 0) baz(1); }
+bar(1);
+bar(1);
+%OptimizeFunctionOnNextCall(bar);
+bar(1);
diff --git a/deps/v8/test/mjsunit/regress/regress-349870.js b/deps/v8/test/mjsunit/regress/regress-349870.js
new file mode 100644
index 000000000..72df05524
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-349870.js
@@ -0,0 +1,7 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var r = /x/;
+Object.freeze(r);
+r.compile("x");
diff --git a/deps/v8/test/mjsunit/regress/regress-349885.js b/deps/v8/test/mjsunit/regress/regress-349885.js
new file mode 100644
index 000000000..dd3e79526
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-349885.js
@@ -0,0 +1,15 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// The bug 349885
+
+function foo(a) {
+ a[292755462] = new Object();
+}
+foo(new Array(5));
+foo(new Array(5));
+%OptimizeFunctionOnNextCall(foo);
+foo(new Array(10));
diff --git a/deps/v8/test/mjsunit/regress/regress-350865.js b/deps/v8/test/mjsunit/regress/regress-350865.js
new file mode 100644
index 000000000..74234db88
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-350865.js
@@ -0,0 +1,17 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stress-compaction --stack-size=150
+
+/\2/.test("1");
+
+function rec() {
+ try {
+ rec();
+ } catch(e) {
+ /\2/.test("1");
+ }
+}
+
+rec();
diff --git a/deps/v8/test/mjsunit/regress/regress-350887.js b/deps/v8/test/mjsunit/regress/regress-350887.js
new file mode 100644
index 000000000..638aa3078
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-350887.js
@@ -0,0 +1,12 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var arr = [];
+assertSame(0, arr.length);
+assertSame(undefined, arr[0]);
+Object.defineProperty(arr, '2501866687', { value: 4, configurable: false });
+// 2501866688 is out of smi range.
+assertSame(2501866688, arr.length);
+assertSame(undefined, arr[0]);
+arr.length = 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-351261.js b/deps/v8/test/mjsunit/regress/regress-351261.js
new file mode 100644
index 000000000..48af5442f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-351261.js
@@ -0,0 +1,19 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --fold-constants
+
+function store(a) {
+ a[5000000] = 1;
+}
+
+function foo() {
+ var __v_8 = new Object;
+ var __v_7 = new Array(4999990);
+ store(__v_8);
+ store(__v_7);
+}
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-351263.js b/deps/v8/test/mjsunit/regress/regress-351263.js
new file mode 100644
index 000000000..28edbcdb6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-351263.js
@@ -0,0 +1,37 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var __v_12 = {};
+function __f_30(x, sa) {
+ return (x >>> sa) | (x << (__v_12 - sa));
+}
+__f_30(1.4, 1);
+__f_30(1.4, 1);
+%OptimizeFunctionOnNextCall(__f_30);
+__f_30(1.4, 1);
diff --git a/deps/v8/test/mjsunit/regress/regress-351315.js b/deps/v8/test/mjsunit/regress/regress-351315.js
new file mode 100644
index 000000000..e2580fc34
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-351315.js
@@ -0,0 +1,49 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f_13(x, y, z) { }
+
+v_5 = f_13.bind({}, -7);
+
+function f_0(z) {
+ return %NewObjectFromBound(v_5);
+}
+
+function f_8(z2, y2) {
+ var v_0 = { f1 : 0.5, f2 : 0.25 };
+ return f_0(v_0);
+}
+
+function f_12(f, args) {
+ f.apply(this, args);
+ %OptimizeFunctionOnNextCall(f);
+ f.apply(this, args);
+}
+
+f_12(f_8, [6, 4]);
diff --git a/deps/v8/test/mjsunit/regress/regress-351319.js b/deps/v8/test/mjsunit/regress/regress-351319.js
new file mode 100644
index 000000000..a2afbb6a9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-351319.js
@@ -0,0 +1,39 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function __f_0(a, base) {
+ a[base] = 1;
+ a[base] = -1749557862;
+}
+var __v_0 = new Array(1024);
+var __v_1 = new Array(128);
+__f_0(__v_0, 1);
+__f_0(__v_1, -2);
+%OptimizeFunctionOnNextCall(__f_0);
+__f_0(__v_0, -2);
diff --git a/deps/v8/test/mjsunit/regress/regress-352059.js b/deps/v8/test/mjsunit/regress/regress-352059.js
new file mode 100644
index 000000000..cd1a4c28f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-352059.js
@@ -0,0 +1,35 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var foo = false;
+
+function bar() {
+ foo = 2;
+ return 4 % foo;
+}
+
+bar();
diff --git a/deps/v8/test/mjsunit/regress/regress-353551.js b/deps/v8/test/mjsunit/regress/regress-353551.js
new file mode 100644
index 000000000..c6e7856d3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-353551.js
@@ -0,0 +1,40 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var depth = 0;
+function __f_3(x) {
+ var __v_1 = arguments;
+ __v_1[1000] = 123;
+ depth++;
+ if (depth > 3000) return;
+ function __f_4() {
+ ++__v_1[0];
+ __f_3(0.5);
+ };
+ __f_4();
+}
+__f_3(0.5);
diff --git a/deps/v8/test/mjsunit/regress/regress-354357.js b/deps/v8/test/mjsunit/regress/regress-354357.js
new file mode 100644
index 000000000..84b7ebd62
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-354357.js
@@ -0,0 +1,38 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --always-opt
+
+var v = {};
+function inlined() {
+ return !(v.bar++);
+}
+function outer() {
+ inlined();
+};
+
+outer();
diff --git a/deps/v8/test/mjsunit/regress/regress-354433.js b/deps/v8/test/mjsunit/regress/regress-354433.js
new file mode 100644
index 000000000..80ea28623
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-354433.js
@@ -0,0 +1,54 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var __v_0 = {};
+var __v_5 = {};
+function __f_2() {
+ this.__defineGetter__('str', function() { return __f_2(this); });
+ this.str = "1";
+ this.toString = function() {
+ return this.str;
+ };
+};
+
+__v_5 = new __f_2();
+__v_0 = new __f_2();
+
+function __f_5(fun,a,b) {
+ __v_5.str = a;
+ __v_0.str = b;
+ fun(__v_5, __v_0);
+}
+
+function __f_8(a,b) { return a%b };
+
+__f_5(__f_8, 1 << 30, 1);
+__f_5(__f_8, 1, 1 << 30);
+%OptimizeFunctionOnNextCall(__f_8);
+__f_5(__f_8, 1, 1 << 30);
diff --git a/deps/v8/test/mjsunit/regress/regress-355485.js b/deps/v8/test/mjsunit/regress/regress-355485.js
new file mode 100644
index 000000000..3c66884c0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-355485.js
@@ -0,0 +1,5 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertEquals("\u039c", "\u00b5".toUpperCase());
diff --git a/deps/v8/test/mjsunit/regress/regress-355523.js b/deps/v8/test/mjsunit/regress/regress-355523.js
new file mode 100644
index 000000000..d61fe844e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-355523.js
@@ -0,0 +1,37 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+// This test requires ASAN.
+
+function __f_4(a, b) { }
+function __f_8(n) { return __f_4(arguments[13], arguments[-10]); }
+function __f_6(a) { return __f_8(0, a); }
+__f_8(0);
+__f_8(0);
+%OptimizeFunctionOnNextCall(__f_8);
+__f_8(0);
diff --git a/deps/v8/test/mjsunit/regress/regress-356053.js b/deps/v8/test/mjsunit/regress/regress-356053.js
new file mode 100644
index 000000000..8f0dbdd09
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-356053.js
@@ -0,0 +1,9 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --noconcurrent-recompilation --expose-gc --allow-natives-syntax
+
+%SetFlags("--concurrent-recompilation --block-concurrent-recompilation");
+gc();
+try { %UnblockConcurrentRecompilation(); } catch (e) { }
diff --git a/deps/v8/test/mjsunit/regress/regress-356589.js b/deps/v8/test/mjsunit/regress/regress-356589.js
new file mode 100644
index 000000000..f93c54564
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-356589.js
@@ -0,0 +1,34 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This test passes if it does not crash in debug mode
+
+arr = ['a', 'b', 'c', 'd'];
+Object.defineProperty(arr.__proto__, '0', { get: function(){} });
+Object.defineProperty(arr, '2', {get: function(){} });
+Object.observe(arr, function() {});
+arr.length = 2;
diff --git a/deps/v8/test/mjsunit/regress/regress-357108.js b/deps/v8/test/mjsunit/regress/regress-357108.js
new file mode 100644
index 000000000..b20975b02
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-357108.js
@@ -0,0 +1,20 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --typed-array-max-size-in-heap=64
+
+function TestArray(constructor) {
+ function Check(a) {
+ a[0] = "";
+ assertEquals(0, a[0]);
+ a[0] = {};
+ assertEquals(0, a[0]);
+ a[0] = { valueOf : function() { return 27; } };
+ assertEquals(27, a[0]);
+ }
+ Check(new constructor(1));
+ Check(new constructor(100));
+}
+
+TestArray(Uint8Array);
diff --git a/deps/v8/test/mjsunit/regress/regress-485.js b/deps/v8/test/mjsunit/regress/regress-485.js
index f26e0eb11..f26e0eb11 100755..100644
--- a/deps/v8/test/mjsunit/regress/regress-485.js
+++ b/deps/v8/test/mjsunit/regress/regress-485.js
diff --git a/deps/v8/test/mjsunit/regress/regress-check-eliminate-loop-phis.js b/deps/v8/test/mjsunit/regress/regress-check-eliminate-loop-phis.js
new file mode 100644
index 000000000..3791c35f7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-check-eliminate-loop-phis.js
@@ -0,0 +1,21 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ var o = {x:1};
+ var y = {y:2.5, x:0};
+ var result;
+ for (var i = 0; i < 2; i++) {
+ result = o.x + 3;
+ o = y;
+ }
+ return result;
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+assertEquals(3, f());
diff --git a/deps/v8/test/mjsunit/regress/regress-cr-344285.js b/deps/v8/test/mjsunit/regress/regress-cr-344285.js
new file mode 100644
index 000000000..42e8bd109
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-cr-344285.js
@@ -0,0 +1,37 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function __f_1(g) { return (g/-1) ^ 1; }
+var __v_0 = 1 << 31;
+var __v_2 = __f_1(__v_0);
+caught = false;
+try {
+ Realm.eval(__v_2, "Realm.global(0).y = 1");
+} catch (e) {
+ caught = true;
+}
+assertTrue(caught, "exception not caught");
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-347903.js b/deps/v8/test/mjsunit/regress/regress-crbug-347903.js
new file mode 100644
index 000000000..b5174da0d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-347903.js
@@ -0,0 +1,19 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-allocation-folding --verify-heap
+
+function f() {
+ var a = new Array(84632);
+ // Allocation folding will bail out trying to fold the elements alloc of
+ // array "b."
+ var b = new Array(84632);
+ var c = new Array(84632);
+ return [a, b, c];
+}
+f(); f();
+%OptimizeFunctionOnNextCall(f);
+for(var i = 0; i < 10; i++) {
+ f();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-349853.js b/deps/v8/test/mjsunit/regress/regress-crbug-349853.js
new file mode 100644
index 000000000..53af64c9d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-349853.js
@@ -0,0 +1,21 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = ["string"];
+function funky(array) { return array[0] = 1; }
+funky(a);
+
+function crash() {
+ var q = [0];
+ // The failing ASSERT was only triggered when compiling for OSR.
+ for (var i = 0; i < 100000; i++) {
+ funky(q);
+ }
+ q[0] = 0;
+ funky(q)
+}
+
+crash();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-350434.js b/deps/v8/test/mjsunit/regress/regress-crbug-350434.js
new file mode 100644
index 000000000..8a9a8e530
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-350434.js
@@ -0,0 +1,33 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --gc-global --noincremental-marking --allow-natives-syntax
+
+function Ctor() {
+ this.foo = 1;
+}
+
+var o = new Ctor();
+var p = new Ctor();
+
+
+function crash(o, timeout) {
+ var s = "4000111222"; // Outside Smi range.
+ %SetAllocationTimeout(100000, timeout);
+ // This allocates a heap number, causing a GC, triggering lazy deopt.
+ var end = s >>> 0;
+ s = s.substring(0, end);
+ // This creates a map dependency, which gives the GC a reason to trigger
+ // a lazy deopt when that map dies.
+ o.bar = 2;
+}
+
+crash(o, 100000);
+crash(o, 100000);
+crash(p, 100000);
+%OptimizeFunctionOnNextCall(crash);
+crash(o, 100000);
+o = null;
+p = null;
+crash({}, 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-350864.js b/deps/v8/test/mjsunit/regress/regress-crbug-350864.js
new file mode 100644
index 000000000..8a793cb0a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-350864.js
@@ -0,0 +1,36 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-symbols
+
+var v0 = new WeakMap;
+var v1 = {};
+v0.set(v1, 1);
+var sym = Symbol();
+v1[sym] = 1;
+var symbols = Object.getOwnPropertySymbols(v1);
+assertArrayEquals([sym], symbols);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-350867.js b/deps/v8/test/mjsunit/regress/regress-crbug-350867.js
new file mode 100644
index 000000000..d8b826cff
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-350867.js
@@ -0,0 +1,15 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f1(a, i) {
+ return a[i];
+}
+function f2(a, b, c, index) {
+ return f1(arguments, index);
+}
+
+f2(2, 3, 4, "foo");
+f2(2, 3, 4, "foo");
+assertEquals(11, f1([11, 22, 33], 0));
+assertEquals(22, f2(22, 33, 44, 0));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-350890.js b/deps/v8/test/mjsunit/regress/regress-crbug-350890.js
new file mode 100644
index 000000000..b60a2aaf9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-350890.js
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function set_length(a, l) {
+ a.length = l;
+}
+
+function test1() {
+ var l = {};
+ var a = Array(l);
+ set_length(a, 3);
+ set_length(a, 3);
+ assertEquals(3, a.length);
+}
+
+function test2() {
+ var a = [];
+ set_length(a, 10);
+ set_length(a, 10);
+ Object.freeze(a);
+ set_length(a, 3);
+ set_length(a, 3);
+ assertEquals(10, a.length);
+}
+
+function test3() {
+ var a = [2];
+ Object.defineProperty(a, "length", {value:2, writable: false});
+ %ToFastProperties(a);
+ set_length([], 10);
+ set_length([], 10);
+ set_length(a, 10);
+ set_length(a, 10);
+ assertEquals(2, a.length);
+}
+
+test1();
+test2();
+test3();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-351262.js b/deps/v8/test/mjsunit/regress/regress-crbug-351262.js
new file mode 100644
index 000000000..a2f4eadc0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-351262.js
@@ -0,0 +1,6 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+for (var x in this) {};
+JSON.stringify(this);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-351320.js b/deps/v8/test/mjsunit/regress/regress-crbug-351320.js
new file mode 100644
index 000000000..24fc64179
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-351320.js
@@ -0,0 +1,21 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --fold-constants
+
+var result = 0;
+var o1 = {};
+o2 = {y:1.5};
+o2.y = 0;
+o3 = o2.y;
+
+function crash() {
+ for (var i = 0; i < 10; i++) {
+ result += o1.x + o3.foo;
+ }
+}
+
+crash();
+%OptimizeFunctionOnNextCall(crash);
+crash();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-351658.js b/deps/v8/test/mjsunit/regress/regress-crbug-351658.js
new file mode 100644
index 000000000..ae6b50ec8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-351658.js
@@ -0,0 +1,14 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+try {
+ var f = eval("(function(){0 = y + y})");
+ %OptimizeFunctionOnNextCall(f);
+ f();
+ assertUnreachable();
+} catch(e) {
+ assertTrue(e instanceof ReferenceError);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-352058.js b/deps/v8/test/mjsunit/regress/regress-crbug-352058.js
new file mode 100644
index 000000000..e270d8300
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-352058.js
@@ -0,0 +1,17 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --check-elimination --stress-opt
+
+var v0 = this;
+var v2 = this;
+function f() {
+ v2 = [1.2, 2.3];
+ v0 = [12, 23];
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-352586.js b/deps/v8/test/mjsunit/regress/regress-crbug-352586.js
new file mode 100644
index 000000000..221048099
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-352586.js
@@ -0,0 +1,15 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var a = {};
+
+function getter() {
+ do {
+ return a + 1;
+ } while (false);
+}
+
+a.__proto__ = Error("");
+a.__defineGetter__('message', getter);
+a.message;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-354391.js b/deps/v8/test/mjsunit/regress/regress-crbug-354391.js
new file mode 100644
index 000000000..e652bd3d0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-354391.js
@@ -0,0 +1,21 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function load(a, i) {
+ return a[i];
+}
+
+function f2(a, b, c, d, index) {
+ return load(arguments, index);
+}
+
+f2(1, 2, 3, 4, "foo");
+f2(1, 2, 3, 4, "foo");
+load([11, 22, 33], 0);
+assertEquals(11, f2(11, 22, 33, 44, 0));
+
+%OptimizeFunctionOnNextCall(load);
+assertEquals(11, f2(11, 22, 33, 44, 0));
diff --git a/deps/v8/test/mjsunit/regress/regress-dictionary-to-fast-arguments.js b/deps/v8/test/mjsunit/regress/regress-dictionary-to-fast-arguments.js
new file mode 100644
index 000000000..f12679a66
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-dictionary-to-fast-arguments.js
@@ -0,0 +1,11 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f(a, b) {
+ for (var i = 10000; i > 0; i--) {
+ arguments[i] = 0;
+ }
+}
+
+f(1.5, 2.5);
diff --git a/deps/v8/test/mjsunit/regress/regress-fast-empty-string.js b/deps/v8/test/mjsunit/regress/regress-fast-empty-string.js
new file mode 100644
index 000000000..9b9fea963
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-fast-empty-string.js
@@ -0,0 +1,13 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var o = {};
+o[""] = 1;
+var x = {__proto__:o};
+for (i = 0; i < 3; i++) {
+ o[""];
+}
+for (i = 0; i < 3; i++) {
+ assertEquals(undefined, o.x);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-force-representation.js b/deps/v8/test/mjsunit/regress/regress-force-representation.js
new file mode 100644
index 000000000..8f6746b7f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-force-representation.js
@@ -0,0 +1,22 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function optimize(crankshaft_test) {
+ crankshaft_test();
+ crankshaft_test();
+ %OptimizeFunctionOnNextCall(crankshaft_test);
+ crankshaft_test();
+}
+
+function f() {
+ var v1 = 0;
+ var v2 = -0;
+ var t = v2++;
+ v2++;
+ return Math.max(v2++, v1++);
+}
+
+optimize(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-is-smi-repr.js b/deps/v8/test/mjsunit/regress/regress-is-smi-repr.js
new file mode 100644
index 000000000..e9f2b516b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-is-smi-repr.js
@@ -0,0 +1,18 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+"use strict";
+
+var global;
+
+function g() { global = this; }
+Object.defineProperty(Number.prototype, "prop", { get: g });
+function f(s) { s.prop; }
+
+f(1);
+f(1);
+%OptimizeFunctionOnNextCall(f);
+f(1);
diff --git a/deps/v8/test/mjsunit/regress/regress-keyed-store-global.js b/deps/v8/test/mjsunit/regress/regress-keyed-store-global.js
new file mode 100644
index 000000000..1b127776d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-keyed-store-global.js
@@ -0,0 +1,12 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --verify-heap
+function f(a) {
+ for (var i = 0; i < 256; i++) a[i] = i;
+}
+
+f([]);
+f([]);
+f(this);
diff --git a/deps/v8/test/mjsunit/regress/regress-migrate-callbacks.js b/deps/v8/test/mjsunit/regress/regress-migrate-callbacks.js
new file mode 100644
index 000000000..b1979ea44
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-migrate-callbacks.js
@@ -0,0 +1,11 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var o1 = {};
+o1.x = 1
+o1.y = 1.5
+var o2 = {}
+o2.x = 1.5;
+o2.__defineSetter__('y', function(v) { });
+o1.y;
diff --git a/deps/v8/test/mjsunit/regress/regress-prepare-break-while-recompile.js b/deps/v8/test/mjsunit/regress/regress-prepare-break-while-recompile.js
index a9c20ec84..0aedcab01 100644
--- a/deps/v8/test/mjsunit/regress/regress-prepare-break-while-recompile.js
+++ b/deps/v8/test/mjsunit/regress/regress-prepare-break-while-recompile.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug --allow-natives-syntax
-// Flags: --block-concurrent-recompilation
+// Flags: --concurrent-recompilation --block-concurrent-recompilation
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
diff --git a/deps/v8/test/mjsunit/regress/regress-sort-arguments.js b/deps/v8/test/mjsunit/regress/regress-sort-arguments.js
new file mode 100644
index 000000000..54ebeb111
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-sort-arguments.js
@@ -0,0 +1,10 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f(a) { return arguments; }
+var a = f(1,2,3);
+delete a[1];
+Array.prototype.sort.apply(a);
+a[10000000] = 4;
+Array.prototype.sort.apply(a);
diff --git a/deps/v8/test/mjsunit/regress/regress-store-global-proxy.js b/deps/v8/test/mjsunit/regress/regress-store-global-proxy.js
new file mode 100644
index 000000000..c85531c5f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-store-global-proxy.js
@@ -0,0 +1,12 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+delete Object.prototype.__proto__;
+
+function f() {
+ this.toString = 1;
+}
+
+f.apply({});
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-store-heapobject.js b/deps/v8/test/mjsunit/regress/regress-store-heapobject.js
new file mode 100644
index 000000000..9f2a1b8ff
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-store-heapobject.js
@@ -0,0 +1,27 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o = {a: undefined};
+
+function store(o, v) {
+ o.a = v;
+}
+
+store(o, undefined);
+store(o, undefined);
+
+function f(bool) {
+ var o = {a: undefined};
+ if (bool) {
+ store(o, 1);
+ }
+ return o;
+}
+
+f(false);
+f(false);
+%OptimizeFunctionOnNextCall(f);
+f(true);
diff --git a/deps/v8/test/mjsunit/regress/setvalueof-deopt.js b/deps/v8/test/mjsunit/regress/setvalueof-deopt.js
new file mode 100644
index 000000000..8c42c8a20
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/setvalueof-deopt.js
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function g(x, y) {
+ return y;
+}
+
+function f(deopt) {
+ return g(%_SetValueOf(1, 1), deopt + 0);
+}
+
+f(0);
+f(0);
+f(0);
+%OptimizeFunctionOnNextCall(f);
+assertEquals("result0", f("result"));
diff --git a/deps/v8/test/mjsunit/regress/string-set-char-deopt.js b/deps/v8/test/mjsunit/regress/string-set-char-deopt.js
new file mode 100644
index 000000000..9f6d43453
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/string-set-char-deopt.js
@@ -0,0 +1,85 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+(function OneByteSeqStringSetCharDeoptOsr() {
+ function deopt() {
+ %DeoptimizeFunction(f);
+ }
+
+ function f(string, osr) {
+ var world = " world";
+ %_OneByteSeqStringSetChar(string, 0, (deopt(), 0x48));
+
+ if (osr) while (%GetOptimizationStatus(f) == 2) {}
+
+ return string + world;
+ }
+
+ assertEquals("Hello " + "world", f("hello", false));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("Hello " + "world", f("hello", true));
+})();
+
+
+(function OneByteSeqStringSetCharDeopt() {
+ function deopt() {
+ %DeoptimizeFunction(f);
+ }
+
+ function g(x) {
+ }
+
+ function f(string) {
+ g(%_OneByteSeqStringSetChar(string, 0, (deopt(), 0x48)));
+ return string;
+ }
+
+ assertEquals("Hell" + "o", f("hello"));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("Hell" + "o", f("hello"));
+})();
+
+
+(function TwoByteSeqStringSetCharDeopt() {
+ function deopt() {
+ %DeoptimizeFunction(f);
+ }
+
+ function g(x) {
+ }
+
+ function f(string) {
+ g(%_TwoByteSeqStringSetChar(string, 0, (deopt(), 0x48)));
+ return string;
+ }
+
+ assertEquals("Hell" + "o", f("\u20ACello"));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("Hell" + "o", f("\u20ACello"));
+})();
diff --git a/deps/v8/test/mjsunit/shift-for-integer-div.js b/deps/v8/test/mjsunit/shift-for-integer-div.js
index aaa67e97f..884202d31 100644
--- a/deps/v8/test/mjsunit/shift-for-integer-div.js
+++ b/deps/v8/test/mjsunit/shift-for-integer-div.js
@@ -60,7 +60,7 @@ divn1(2);
divn1(2);
%OptimizeFunctionOnNextCall(divn1);
assertEquals(-2, divn1(2));
-assertEquals(two_31, divn1(-two_31));
+assertEquals(-two_31, divn1(two_31));
//Check for truncating to int32 case
@@ -85,3 +85,14 @@ divn4t(8);
assertEquals(1, divn4t(-5));
assertEquals(-1, divn4t(5));
assertOptimized(divn4t);
+
+// Check kMinInt case.
+function div_by_two(x) {
+ return (x / 2) | 0;
+}
+
+div_by_two(12);
+div_by_two(34);
+%OptimizeFunctionOnNextCall(div_by_two);
+div_by_two(56);
+assertEquals(-(1 << 30), div_by_two(1 << 31));
diff --git a/deps/v8/test/mjsunit/simple-constructor.js b/deps/v8/test/mjsunit/simple-constructor.js
index 391ef3d6d..391ef3d6d 100755..100644
--- a/deps/v8/test/mjsunit/simple-constructor.js
+++ b/deps/v8/test/mjsunit/simple-constructor.js
diff --git a/deps/v8/test/mjsunit/smi-mul-const.js b/deps/v8/test/mjsunit/smi-mul-const.js
new file mode 100644
index 000000000..ca627fc27
--- /dev/null
+++ b/deps/v8/test/mjsunit/smi-mul-const.js
@@ -0,0 +1,87 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --noalways-opt
+
+function check(func, input, expected) {
+ func(-1);
+ func(-1);
+ %OptimizeFunctionOnNextCall(func);
+ assertEquals(expected, func(input));
+ assertOptimized(func);
+}
+
+function mul_by_neg_1(a) { return a * -1; }
+function mul_by_0(a) { return a * 0; }
+function mul_by_1(a) { return a * 1; }
+function mul_by_2(a) { return a * 2; }
+
+check(mul_by_neg_1, 2, -2);
+check(mul_by_0, 2, 0);
+check(mul_by_1, 2, 2);
+check(mul_by_2, 2, 4);
+
+function limit_range(a) {
+ // Limit the range of 'a' to enable no-overflow optimizations.
+ return Math.max(Math.min(a | 0, 10), -10);
+}
+
+function mul_by_neg_127(a) { return limit_range(a) * -127; }
+function mul_by_neg_128(a) { return limit_range(a) * -128; }
+function mul_by_neg_129(a) { return limit_range(a) * -129; }
+function mul_by_1023(a) { return limit_range(a) * 1023; }
+function mul_by_1024(a) { return limit_range(a) * 1024; }
+function mul_by_1025(a) { return limit_range(a) * 1025; }
+
+check(mul_by_neg_127, 2, -254);
+check(mul_by_neg_128, 2, -256);
+check(mul_by_neg_129, 2, -258);
+check(mul_by_1023, 2, 2046);
+check(mul_by_1024, 2, 2048);
+check(mul_by_1025, 2, 2050);
+
+// Deopt on minus zero.
+assertEquals(-0, mul_by_neg_128(0));
+assertUnoptimized(mul_by_neg_128);
+assertEquals(-0, mul_by_2(-0));
+assertUnoptimized(mul_by_2);
+
+// Deopt on overflow.
+
+// 2^30 is a smi boundary on arm and ia32.
+var two_30 = 1 << 30;
+// 2^31 is a smi boundary on arm64 and x64.
+var two_31 = 2 * two_30;
+
+// TODO(rmcilroy): replace after r16361 with: if (%IsValidSmi(two_31)) {
+if (true) {
+ assertEquals(two_31, mul_by_neg_1(-two_31));
+ assertUnoptimized(mul_by_neg_1);
+} else {
+ assertEquals(two_30, mul_by_neg_1(-two_30));
+ assertUnoptimized(mul_by_neg_1);
+}
diff --git a/deps/v8/test/mjsunit/string-case.js b/deps/v8/test/mjsunit/string-case.js
index 283e703fc..34c2340d3 100644
--- a/deps/v8/test/mjsunit/string-case.js
+++ b/deps/v8/test/mjsunit/string-case.js
@@ -25,7 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --random-seed=17
+// Flags: --random-seed=17 --allow-natives-syntax
+// Flags: --expose-externalize-string
assertEquals("ΚΟΣΜΟΣ ΚΟΣΜΟΣ".toLowerCase(), "κοσμος κοσμος");
@@ -58,6 +59,19 @@ function test(length) {
strLower += String.fromCharCode(charCodeToLower(c));
strUpper += String.fromCharCode(charCodeToUpper(c));
}
+ %FlattenString(strLower);
+ %FlattenString(strUpper);
+ // Sequential string.
+ assertEquals(strLower, str.toLowerCase());
+ assertEquals(strUpper, str.toUpperCase());
+ // Cons string.
+ assertEquals(strLower + strLower, (str + str).toLowerCase());
+ assertEquals(strUpper + strUpper, (str + str).toUpperCase());
+ // Sliced string.
+ assertEquals(strLower.substring(1), str.substring(1).toLowerCase());
+ assertEquals(strUpper.substring(1), str.substring(1).toUpperCase());
+ // External string.
+ externalizeString(str, false);
assertEquals(strLower, str.toLowerCase());
assertEquals(strUpper, str.toUpperCase());
}
diff --git a/deps/v8/test/mjsunit/string-match.js b/deps/v8/test/mjsunit/string-match.js
index 202396d30..202396d30 100755..100644
--- a/deps/v8/test/mjsunit/string-match.js
+++ b/deps/v8/test/mjsunit/string-match.js
diff --git a/deps/v8/test/mjsunit/string-oom-array-join.js b/deps/v8/test/mjsunit/string-oom-array-join.js
new file mode 100644
index 000000000..73758ce96
--- /dev/null
+++ b/deps/v8/test/mjsunit/string-oom-array-join.js
@@ -0,0 +1,14 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var a = "a";
+for (var i = 0; i < 23; i++) a += a;
+var b = [];
+for (var i = 0; i < (1<<5); i++) b.push(a);
+
+function join() {
+ b.join();
+}
+
+assertThrows(join, RangeError);
diff --git a/deps/v8/test/mjsunit/string-oom-concat.js b/deps/v8/test/mjsunit/string-oom-concat.js
new file mode 100644
index 000000000..9529c8938
--- /dev/null
+++ b/deps/v8/test/mjsunit/string-oom-concat.js
@@ -0,0 +1,12 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function concat() {
+ var a = " ";
+ for (var i = 0; i < 100; i++) {
+ a += a;
+ }
+}
+
+assertThrows(concat, RangeError);
diff --git a/deps/v8/test/mjsunit/string-oom-replace-global-regexp-with-string.js b/deps/v8/test/mjsunit/string-oom-replace-global-regexp-with-string.js
new file mode 100644
index 000000000..2de01109e
--- /dev/null
+++ b/deps/v8/test/mjsunit/string-oom-replace-global-regexp-with-string.js
@@ -0,0 +1,26 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+
+var a = 'a';
+for (var i = 0; i < 5; i++) a += a;
+var b = 'b';
+for (var i = 0; i < 23; i++) b += b;
+
+function replace1() {
+ a.replace(/./g, b);
+}
+
+assertThrows(replace1, RangeError);
+
+
+var a = 'a';
+for (var i = 0; i < 16; i++) a += a;
+
+function replace2() {
+ a.replace(/a/g, a);
+}
+
+assertThrows(replace2, RangeError);
diff --git a/deps/v8/test/mjsunit/string-oom-replace-regexp-global-with-function.js b/deps/v8/test/mjsunit/string-oom-replace-regexp-global-with-function.js
new file mode 100644
index 000000000..5555a5f1e
--- /dev/null
+++ b/deps/v8/test/mjsunit/string-oom-replace-regexp-global-with-function.js
@@ -0,0 +1,14 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var a = "a";
+for (var i = 0; i < 5; i++) a += a;
+var b = "b";
+for (var i = 0; i < 23; i++) b += b;
+
+function replace() {
+ a.replace(/a/g, function() { return b });
+}
+
+assertThrows(replace, RangeError);
diff --git a/deps/v8/test/mjsunit/string-slices.js b/deps/v8/test/mjsunit/string-slices.js
index 2fec04b0b..2fec04b0b 100755..100644
--- a/deps/v8/test/mjsunit/string-slices.js
+++ b/deps/v8/test/mjsunit/string-slices.js
diff --git a/deps/v8/test/mjsunit/substr.js b/deps/v8/test/mjsunit/substr.js
index cab8b1bf6..cab8b1bf6 100755..100644
--- a/deps/v8/test/mjsunit/substr.js
+++ b/deps/v8/test/mjsunit/substr.js
diff --git a/deps/v8/test/mjsunit/test-hidden-string.js b/deps/v8/test/mjsunit/test-hidden-string.js
new file mode 100644
index 000000000..a5d32c839
--- /dev/null
+++ b/deps/v8/test/mjsunit/test-hidden-string.js
@@ -0,0 +1,11 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o = {};
+%SetHiddenProperty(o, "test", 1);
+// Create non-internalized ""
+var empty = "a".substring(1, 1);
+assertEquals(undefined, o[empty]);
diff --git a/deps/v8/test/mjsunit/third_party/array-isarray.js b/deps/v8/test/mjsunit/third_party/array-isarray.js
deleted file mode 100644
index 0fc42a3f2..000000000
--- a/deps/v8/test/mjsunit/third_party/array-isarray.js
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) 2009 Apple Computer, Inc. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-//
-// 3. Neither the name of the copyright holder(s) nor the names of any
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Based on LayoutTests/fast/js/resources/Array-isArray.js
-
-assertTrue(Array.isArray([]));
-assertTrue(Array.isArray(new Array));
-assertTrue(Array.isArray(Array()));
-assertTrue(Array.isArray('abc'.match(/(a)*/g)));
-assertFalse((function(){ return Array.isArray(arguments); })());
-assertFalse(Array.isArray());
-assertFalse(Array.isArray(null));
-assertFalse(Array.isArray(undefined));
-assertFalse(Array.isArray(true));
-assertFalse(Array.isArray(false));
-assertFalse(Array.isArray('a string'));
-assertFalse(Array.isArray({}));
-assertFalse(Array.isArray({length: 5}));
-assertFalse(Array.isArray({__proto__: Array.prototype, length:1, 0:1, 1:2}));
-
diff --git a/deps/v8/test/mjsunit/third_party/array-splice-webkit.js b/deps/v8/test/mjsunit/third_party/array-splice-webkit.js
deleted file mode 100644
index 974ac55e6..000000000
--- a/deps/v8/test/mjsunit/third_party/array-splice-webkit.js
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-//
-// 3. Neither the name of the copyright holder(s) nor the names of any
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Simple splice tests based on webkit layout tests.
-var arr = ['a','b','c','d'];
-assertArrayEquals(['a','b','c','d'], arr);
-assertArrayEquals(['c','d'], arr.splice(2));
-assertArrayEquals(['a','b'], arr);
-assertArrayEquals(['a','b'], arr.splice(0));
-assertArrayEquals([], arr)
-
-arr = ['a','b','c','d'];
-assertEquals([], arr.splice())
-assertArrayEquals(['a','b','c','d'], arr);
-assertArrayEquals(['a','b','c','d'], arr.splice(undefined))
-assertArrayEquals([], arr);
-
-arr = ['a','b','c','d'];
-assertArrayEquals(['a','b','c','d'], arr.splice(null))
-assertArrayEquals([], arr);
-
-arr = ['a','b','c','d'];
-assertArrayEquals([], arr.splice(100))
-assertArrayEquals(['a','b','c','d'], arr);
-assertArrayEquals(['d'], arr.splice(-1))
-assertArrayEquals(['a','b','c'], arr);
-
-assertArrayEquals([], arr.splice(2, undefined))
-assertArrayEquals([], arr.splice(2, null))
-assertArrayEquals([], arr.splice(2, -1))
-assertArrayEquals([], arr.splice(2, 0))
-assertArrayEquals(['a','b','c'], arr);
-assertArrayEquals(['c'], arr.splice(2, 100))
-assertArrayEquals(['a','b'], arr);
diff --git a/deps/v8/test/mjsunit/third_party/string-trim.js b/deps/v8/test/mjsunit/third_party/string-trim.js
deleted file mode 100644
index 234dff6dc..000000000
--- a/deps/v8/test/mjsunit/third_party/string-trim.js
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright (c) 2009 Apple Computer, Inc. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-//
-// 3. Neither the name of the copyright holder(s) nor the names of any
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Based on LayoutTests/fast/js/script-tests/string-trim.js
-
-// References to trim(), trimLeft() and trimRight() functions for
-// testing Function's *.call() and *.apply() methods.
-
-var trim = String.prototype.trim;
-var trimLeft = String.prototype.trimLeft;
-var trimRight = String.prototype.trimRight;
-
-var testString = 'foo bar';
-var trimString = '';
-var leftTrimString = '';
-var rightTrimString = '';
-var wsString = '';
-
-var whitespace = [
- {s : '\u0009', t : 'HORIZONTAL TAB'},
- {s : '\u000A', t : 'LINE FEED OR NEW LINE'},
- {s : '\u000B', t : 'VERTICAL TAB'},
- {s : '\u000C', t : 'FORMFEED'},
- {s : '\u000D', t : 'CARRIAGE RETURN'},
- {s : '\u0020', t : 'SPACE'},
- {s : '\u00A0', t : 'NO-BREAK SPACE'},
- {s : '\u2000', t : 'EN QUAD'},
- {s : '\u2001', t : 'EM QUAD'},
- {s : '\u2002', t : 'EN SPACE'},
- {s : '\u2003', t : 'EM SPACE'},
- {s : '\u2004', t : 'THREE-PER-EM SPACE'},
- {s : '\u2005', t : 'FOUR-PER-EM SPACE'},
- {s : '\u2006', t : 'SIX-PER-EM SPACE'},
- {s : '\u2007', t : 'FIGURE SPACE'},
- {s : '\u2008', t : 'PUNCTUATION SPACE'},
- {s : '\u2009', t : 'THIN SPACE'},
- {s : '\u200A', t : 'HAIR SPACE'},
- {s : '\u3000', t : 'IDEOGRAPHIC SPACE'},
- {s : '\u2028', t : 'LINE SEPARATOR'},
- {s : '\u2029', t : 'PARAGRAPH SEPARATOR'},
- {s : '\u200B', t : 'ZERO WIDTH SPACE (category Cf)'}
-];
-
-for (var i = 0; i < whitespace.length; i++) {
- assertEquals(whitespace[i].s.trim(), '');
- assertEquals(whitespace[i].s.trimLeft(), '');
- assertEquals(whitespace[i].s.trimRight(), '');
- wsString += whitespace[i].s;
-}
-
-trimString = wsString + testString + wsString;
-leftTrimString = testString + wsString; // Trimmed from the left.
-rightTrimString = wsString + testString; // Trimmed from the right.
-
-assertEquals(wsString.trim(), '');
-assertEquals(wsString.trimLeft(), '');
-assertEquals(wsString.trimRight(), '');
-
-assertEquals(trimString.trim(), testString);
-assertEquals(trimString.trimLeft(), leftTrimString);
-assertEquals(trimString.trimRight(), rightTrimString);
-
-assertEquals(leftTrimString.trim(), testString);
-assertEquals(leftTrimString.trimLeft(), leftTrimString);
-assertEquals(leftTrimString.trimRight(), testString);
-
-assertEquals(rightTrimString.trim(), testString);
-assertEquals(rightTrimString.trimLeft(), testString);
-assertEquals(rightTrimString.trimRight(), rightTrimString);
-
-var testValues = [0, Infinity, NaN, true, false, ({}), ['an','array'],
- ({toString:function(){return 'wibble'}})
-];
-
-for (var i = 0; i < testValues.length; i++) {
- assertEquals(trim.call(testValues[i]), String(testValues[i]));
- assertEquals(trimLeft.call(testValues[i]), String(testValues[i]));
- assertEquals(trimRight.call(testValues[i]), String(testValues[i]));
-}
diff --git a/deps/v8/test/mjsunit/value-wrapper-accessor.js b/deps/v8/test/mjsunit/value-wrapper-accessor.js
index 2a51fee99..f95145652 100644
--- a/deps/v8/test/mjsunit/value-wrapper-accessor.js
+++ b/deps/v8/test/mjsunit/value-wrapper-accessor.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// When calling user-defined accessors on strings, booleans or
-// numbers, we should create a wrapper object in classic-mode.
+// numbers, we should create a wrapper object in sloppy mode.
// Flags: --allow-natives-syntax
diff --git a/deps/v8/test/mjsunit/whitespaces.js b/deps/v8/test/mjsunit/whitespaces.js
new file mode 100644
index 000000000..78e4ad5d3
--- /dev/null
+++ b/deps/v8/test/mjsunit/whitespaces.js
@@ -0,0 +1,115 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var whitespaces = [
+ // WhiteSpace defined in ECMA-262 5.1, 7.2
+ 0x0009, // Tab TAB
+ 0x000B, // Vertical Tab VT
+ 0x000C, // Form Feed FF
+ 0x0020, // Space SP
+ 0x00A0, // No-break space NBSP
+ 0xFEFF, // Byte Order Mark BOM
+
+ // LineTerminator defined in ECMA-262 5.1, 7.3
+ 0x000A, // Line Feed LF
+ 0x000D, // Carriage Return CR
+ 0x2028, // Line Separator LS
+ 0x2029, // Paragraph Separator PS
+
+ // Unicode 6.3.0 whitespaces (category 'Zs')
+ 0x1680, // Ogham Space Mark
+ 0x180E, // Mongolian Vowel Separator
+ 0x2000, // EN QUAD
+ 0x2001, // EM QUAD
+ 0x2002, // EN SPACE
+ 0x2003, // EM SPACE
+ 0x2004, // THREE-PER-EM SPACE
+ 0x2005, // FOUR-PER-EM SPACE
+ 0x2006, // SIX-PER-EM SPACE
+ 0x2007, // FIGURE SPACE
+ 0x2008, // PUNCTUATION SPACE
+ 0x2009, // THIN SPACE
+ 0x200A, // HAIR SPACE
+ 0x2028, // LINE SEPARATOR
+ 0x2029, // PARAGRAPH SEPARATOR
+ 0x202F, // NARROW NO-BREAK SPACE
+ 0x205F, // MEDIUM MATHEMATICAL SPACE
+ 0x3000, // IDEOGRAPHIC SPACE
+];
+
+// Add single twobyte char to force twobyte representation.
+// Interestingly, snowman is not "white" space :)
+var twobyte = "\u2603";
+var onebyte = "\u007E";
+var twobytespace = "\u2000";
+var onebytespace = "\u0020";
+
+function is_whitespace(c) {
+ return whitespaces.indexOf(c.charCodeAt(0)) > -1;
+}
+
+function test_regexp(str) {
+ var pos_match = str.match(/\s/);
+ var neg_match = str.match(/\S/);
+ var test_char = str[0];
+ var postfix = str[1];
+ if (is_whitespace(test_char)) {
+ assertEquals(test_char, pos_match[0]);
+ assertEquals(postfix, neg_match[0]);
+ } else {
+ assertEquals(test_char, neg_match[0]);
+ assertNull(pos_match);
+ }
+}
+
+function test_trim(c, infix) {
+ var str = c + c + c + infix + c;
+ if (is_whitespace(c)) {
+ assertEquals(infix, str.trim());
+ } else {
+ assertEquals(str, str.trim());
+ }
+}
+
+function test_parseInt(c, postfix) {
+ // Skip if prefix is a digit.
+ if (c >= "0" && c <= "9") return;
+ var str = c + c + "123" + postfix;
+ if (is_whitespace(c)) {
+ assertEquals(123, parseInt(str));
+ } else {
+ assertEquals(NaN, parseInt(str));
+ }
+}
+
+function test_eval(c, content) {
+ if (!is_whitespace(c)) return;
+ var str = c + c + "'" + content + "'" + c + c;
+ assertEquals(content, eval(str));
+}
+
+function test_stringtonumber(c, postfix) {
+ // Skip if prefix is a digit.
+ if (c >= "0" && c <= "9") return;
+ var result = 1 + Number(c + "123" + c + postfix);
+ if (is_whitespace(c)) {
+ assertEquals(124, result);
+ } else {
+ assertEquals(NaN, result);
+ }
+}
+
+for (var i = 0; i < 0x10000; i++) {
+ c = String.fromCharCode(i);
+ test_regexp(c + onebyte);
+ test_regexp(c + twobyte);
+ test_trim(c, onebyte + "trim");
+ test_trim(c, twobyte + "trim");
+ test_parseInt(c, onebyte);
+ test_parseInt(c, twobyte);
+ test_eval(c, onebyte);
+ test_eval(c, twobyte);
+ test_stringtonumber(c, onebytespace);
+ test_stringtonumber(c, twobytespace);
+}
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index dfe61c268..04fc6a98a 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -77,6 +77,12 @@
# TODO(2018): Temporarily allow timeout in debug mode.
'js1_5/GC/regress-203278-2': [PASS, ['mode == debug', TIMEOUT, NO_VARIANTS, FAIL]],
+ # These tests use invalid LHS expressions in assignments.
+ 'js1_5/Regress/regress-319391': [SKIP],
+ 'js1_5/Regress/regress-350253': [SKIP],
+ 'js1_5/Regress/regress-462292': [SKIP],
+ 'js1_5/decompilation/regress-443071-01': [SKIP],
+
##################### SLOW TESTS #####################
# This takes a long time to run (~100 seconds). It should only be run
@@ -129,9 +135,10 @@
'ecma/Date/15.9.2.2-5': [PASS, FAIL],
'ecma/Date/15.9.2.2-6': [PASS, FAIL],
- # 1026139: These date tests fail on arm and mips
- 'ecma/Date/15.9.5.29-1': [PASS, ['arch == arm or arch == mipsel', FAIL]],
- 'ecma/Date/15.9.5.28-1': [PASS, ['arch == arm or arch == mipsel', FAIL]],
+ # 1026139: These date tests fail on arm and mips.
+ # These date tests also fail in a time zone without daylight saving time.
+ 'ecma/Date/15.9.5.29-1': [PASS, FAIL],
+ 'ecma/Date/15.9.5.28-1': [PASS, FAIL],
# 1050186: Arm/MIPS vm is broken; probably unrelated to dates
'ecma/Array/15.4.4.5-3': [PASS, ['arch == arm or arch == mipsel', FAIL]],
@@ -822,7 +829,7 @@
}], # ALWAYS
-['arch == arm', {
+['arch == arm or arch == arm64', {
# BUG(3251229): Times out when running new crankshaft test script.
'ecma_3/RegExp/regress-311414': [SKIP],
@@ -839,7 +846,13 @@
# BUG(1040): Allow this test to timeout.
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
-}], # 'arch == arm'
+}], # 'arch == arm or arch == arm64'
+
+
+['arch == arm64', {
+ # BUG(v8:3152): Runs out of stack in debug mode.
+ 'js1_5/extensions/regress-355497': [FAIL_OK, ['mode == debug', SKIP]],
+}], # 'arch == arm64'
['arch == mipsel', {
@@ -860,4 +873,25 @@
# BUG(1040): Allow this test to timeout.
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
}], # 'arch == mipsel'
+
+['arch == arm64 and simulator_run == True', {
+
+ 'js1_5/GC/regress-203278-2': [SKIP],
+
+ # These tests time out in debug mode but pass in product mode
+ 'js1_5/Regress/regress-360969-03': [SKIP],
+ 'js1_5/Regress/regress-360969-04': [SKIP],
+ 'js1_5/Regress/regress-360969-05': [SKIP],
+ 'js1_5/Regress/regress-360969-06': [SKIP],
+ 'js1_5/extensions/regress-365527': [SKIP],
+ 'ecma/Date/15.9.5.10-2': [SKIP],
+ 'js1_5/Regress/regress-416628': [SKIP],
+ 'js1_5/extensions/regress-371636': [SKIP],
+ 'ecma_3/RegExp/regress-330684': [SKIP],
+ 'ecma_3/RegExp/regress-307456': [SKIP],
+ 'js1_5/Regress/regress-303213': [SKIP],
+ 'js1_5/extensions/regress-330569': [SKIP],
+ 'js1_5/extensions/regress-351448': [SKIP],
+ 'js1_5/extensions/regress-336410-1': [SKIP],
+}], # 'arch == arm64 and simulator_run == True'
]
diff --git a/deps/v8/test/mozilla/testcfg.py b/deps/v8/test/mozilla/testcfg.py
index 775a239f0..70a7ac663 100644
--- a/deps/v8/test/mozilla/testcfg.py
+++ b/deps/v8/test/mozilla/testcfg.py
@@ -132,8 +132,11 @@ class MozillaTestSuite(testsuite.TestSuite):
# If we have a local archive file with the test data, extract it.
directory_name = "data"
+ directory_name_old = "data.old"
if os.path.exists(directory_name):
- os.rename(directory_name, "data.old")
+ if os.path.exists(directory_name_old):
+ shutil.rmtree(directory_name_old)
+ os.rename(directory_name, directory_name_old)
archive_file = "downloaded_%s.tar.gz" % MOZILLA_VERSION
if os.path.exists(archive_file):
with tarfile.open(archive_file, "r:gz") as tar:
diff --git a/deps/v8/test/preparser/preparser.status b/deps/v8/test/preparser/preparser.status
index 9d69988f7..babf35d5d 100644
--- a/deps/v8/test/preparser/preparser.status
+++ b/deps/v8/test/preparser/preparser.status
@@ -25,6 +25,10 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# We don't parse RegExps at scanning time, so we can't fail on octal
+# escapes (we need to parse to distinguish octal escapes from valid
+# back-references).
[
[ALWAYS, {
# TODO(mstarzinger): This script parses but throws a TypeError when run.
diff --git a/deps/v8/test/promises-aplus/README b/deps/v8/test/promises-aplus/README
new file mode 100644
index 000000000..de15da362
--- /dev/null
+++ b/deps/v8/test/promises-aplus/README
@@ -0,0 +1,29 @@
+This directory contains code for running Promise/A+ Compliance Test Suite[1].
+You can download the it from [1], or by specifying --download to
+tools/run-tests.py.
+Promise/A+ Compliance Test Suite requires Node environment and needs some
+libraries. To run it in d8 shell, we provides some emulation functions in the
+lib/ directory.
+
+ - lib/adapter.js
+ - An adapter for harmony Promise used in Promise/A+ tests.
+ - lib/assert.js
+ - Emulates assert modules in Node.
+ - lib/global.js
+ - Provides global functions and variables.
+ - lib/mocha.js
+ - Emulates Mocha[2] test framework.
+ - lib/require.j
+ - Emulate require function in Node.
+ - lib/run-tests.js
+ - Run all describe tests.
+
+The emulation is not complete. Upgrading Promise/A+ tests will require
+changing lib/ scripts.
+
+Sinon.JS[3], required by Promise/A+ tests, is also downloaded by run-tests.py.
+
+[1]: https://github.com/promises-aplus/promises-tests
+[2]: http://visionmedia.github.io/mocha/
+[3]: http://sinonjs.org/
+
diff --git a/deps/v8/test/promises-aplus/lib/adapter.js b/deps/v8/test/promises-aplus/lib/adapter.js
new file mode 100644
index 000000000..d99913a13
--- /dev/null
+++ b/deps/v8/test/promises-aplus/lib/adapter.js
@@ -0,0 +1,41 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var global = this.global || {};
+
+global.adapter = {
+ resolved: function(value) { return Promise.resolve(value); },
+ rejected: function(reason) { return Promise.reject(reason); },
+ deferred: function() {
+ var resolve, reject;
+ var promise = new Promise(function(res, rej) {
+ resolve = res;
+ reject = rej;
+ });
+ return {promise: promise, resolve: resolve, reject: reject};
+ }
+};
diff --git a/deps/v8/test/promises-aplus/lib/assert.js b/deps/v8/test/promises-aplus/lib/assert.js
new file mode 100644
index 000000000..0138f3604
--- /dev/null
+++ b/deps/v8/test/promises-aplus/lib/assert.js
@@ -0,0 +1,97 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Mimics assert module in node.
+
+function compose(message1, message2) {
+ return message2 ? message1 + ': ' + message2 : message1
+}
+
+function fail(actual, expected, message, operator) {
+ var e = Error(compose('FAIL', message) +
+ ': (' + actual + ' ' + operator + ' ' + expected + ') should hold');
+ fails.push(e);
+ throw e;
+}
+
+function ok(value, message) {
+ if (!value) {
+ throw Error(compose('FAIL', + message) + ': value = ' + value);
+ }
+}
+
+function equal(actual, expected, message) {
+ if (!(expected == actual)) {
+ fail(actual, expected, message, '==');
+ }
+}
+
+function notEqual(actual, expected, message) {
+ if (!(expected != actual)) {
+ fail(actual, expected, message, '!=');
+ }
+}
+
+function strictEqual(actual, expected, message) {
+ if (!(expected === actual)) {
+ fail(actual, expected, message, '===');
+ }
+}
+
+function notStrictEqual(actual, expected, message) {
+ if (!(expected !== actual)) {
+ fail(actual, expected, message, '!==');
+ }
+}
+
+function assert(value, message) {
+ return ok(value, message);
+}
+
+function notImplemented() {
+ throw Error('FAIL: This assertion function is not yet implemented.');
+}
+
+function clear() {
+ this.fails = [];
+}
+
+assert.fail = fail;
+assert.ok = ok;
+assert.equal = equal;
+assert.notEqual = notEqual;
+assert.deepEqual = notImplemented;
+assert.notDeepEqual = notImplemented;
+assert.strictEqual = strictEqual;
+assert.notStrictEqual = notStrictEqual;
+assert.throws = notImplemented;
+assert.doesNotThrow = notImplemented;
+assert.ifError = notImplemented;
+
+assert.clear = clear;
+
+exports = assert;
diff --git a/deps/v8/test/promises-aplus/lib/global.js b/deps/v8/test/promises-aplus/lib/global.js
new file mode 100644
index 000000000..1466d2063
--- /dev/null
+++ b/deps/v8/test/promises-aplus/lib/global.js
@@ -0,0 +1,76 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var global = this.global || {};
+var setTimeout;
+var clearTimeout;
+
+(function() {
+var timers = {};
+var currentId = 0;
+
+function PostMicrotask(fn) {
+ var o = {};
+ Object.observe(o, function() {
+ fn();
+ });
+ // Change something to enqueue a microtask.
+ o.x = 'hello';
+}
+
+setInterval = function(fn, delay) {
+ var i = 0;
+ var id = currentId++;
+ function loop() {
+ if (!timers[id]) {
+ return;
+ }
+ if (i++ >= delay) {
+ fn();
+ }
+ PostMicrotask(loop);
+ }
+ PostMicrotask(loop);
+ timers[id] = true;
+ return id;
+}
+
+clearTimeout = function(id) {
+ delete timers[id];
+}
+
+clearInterval = clearTimeout;
+
+setTimeout = function(fn, delay) {
+ var id = setInterval(function() {
+ fn();
+ clearInterval(id);
+ }, delay);
+ return id;
+}
+
+}());
diff --git a/deps/v8/test/promises-aplus/lib/mocha.js b/deps/v8/test/promises-aplus/lib/mocha.js
new file mode 100644
index 000000000..24d294ef8
--- /dev/null
+++ b/deps/v8/test/promises-aplus/lib/mocha.js
@@ -0,0 +1,264 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file emulates Mocha test framework used in promises-aplus tests.
+
+var describe;
+var it;
+var specify;
+var before;
+var after;
+var beforeEach;
+var afterEach;
+var RunAllTests;
+
+var assert = require('assert');
+
+(function() {
+var TIMEOUT = 1000;
+
+function PostMicrotask(fn) {
+ var o = {};
+ Object.observe(o, function() {
+ fn();
+ });
+ // Change something to enqueue a microtask.
+ o.x = 'hello';
+}
+
+var context = {
+ beingDescribed: undefined,
+ currentSuiteIndex: 0,
+ suites: []
+};
+
+function Run() {
+ function current() {
+ while (context.currentSuiteIndex < context.suites.length &&
+ context.suites[context.currentSuiteIndex].hasRun) {
+ ++context.currentSuiteIndex;
+ }
+ if (context.suites.length == context.currentSuiteIndex) {
+ return undefined;
+ }
+ return context.suites[context.currentSuiteIndex];
+ }
+ var suite = current();
+ if (!suite) {
+ // done
+ print('All tests have run.');
+ return;
+ }
+ suite.Run();
+}
+
+RunAllTests = function() {
+ context.currentSuiteIndex = 0;
+ var numRegularTestCases = 0;
+ for (var i = 0; i < context.suites.length; ++i) {
+ numRegularTestCases += context.suites[i].numRegularTestCases();
+ }
+ print(context.suites.length + ' suites and ' + numRegularTestCases +
+ ' test cases are found');
+ Run();
+};
+
+function TestCase(name, before, fn, after, isRegular) {
+ this.name = name;
+ this.before = before;
+ this.fn = fn;
+ this.after = after;
+ this.isRegular = isRegular;
+ this.hasDone = false;
+}
+
+TestCase.prototype.RunFunction = function(suite, fn, postAction) {
+ if (!fn) {
+ postAction();
+ return;
+ }
+ try {
+ if (fn.length === 0) {
+ // synchronous
+ fn();
+ postAction();
+ } else {
+ // asynchronous
+ fn(postAction);
+ }
+ } catch (e) {
+ suite.ReportError(this, e);
+ }
+}
+
+TestCase.prototype.MarkAsDone = function() {
+ this.hasDone = true;
+ clearTimeout(this.timer);
+}
+
+TestCase.prototype.Run = function(suite, postAction) {
+ print('Running ' + suite.description + '#' + this.name + ' ...');
+ assert.clear();
+
+ this.timer = setTimeout(function() {
+ suite.ReportError(this, Error('timeout'));
+ }.bind(this), TIMEOUT);
+
+ this.RunFunction(suite, this.before, function(e) {
+ if (this.hasDone) {
+ return;
+ }
+ if (e instanceof Error) {
+ return suite.ReportError(this, e);
+ }
+ if (assert.fails.length > 0) {
+ return suite.ReportError(this, assert.fails[0]);
+ }
+ this.RunFunction(suite, this.fn, function(e) {
+ if (this.hasDone) {
+ return;
+ }
+ if (e instanceof Error) {
+ return suite.ReportError(this, e);
+ }
+ if (assert.fails.length > 0) {
+ return suite.ReportError(this, assert.fails[0]);
+ }
+ this.RunFunction(suite, this.after, function(e) {
+ if (this.hasDone) {
+ return;
+ }
+ if (e instanceof Error) {
+ return suite.ReportError(this, e);
+ }
+ if (assert.fails.length > 0) {
+ return suite.ReportError(this, assert.fails[0]);
+ }
+ this.MarkAsDone();
+ if (this.isRegular) {
+ print('PASS: ' + suite.description + '#' + this.name);
+ }
+ PostMicrotask(postAction);
+ }.bind(this));
+ }.bind(this));
+ }.bind(this));
+};
+
+function TestSuite(described) {
+ this.description = described.description;
+ this.cases = [];
+ this.currentIndex = 0;
+ this.hasRun = false;
+
+ if (described.before) {
+ this.cases.push(new TestCase(this.description + ' :before', undefined,
+ described.before, undefined, false));
+ }
+ for (var i = 0; i < described.cases.length; ++i) {
+ this.cases.push(new TestCase(described.cases[i].description,
+ described.beforeEach,
+ described.cases[i].fn,
+ described.afterEach,
+ true));
+ }
+ if (described.after) {
+ this.cases.push(new TestCase(this.description + ' :after',
+ undefined, described.after, undefined, false));
+ }
+}
+
+TestSuite.prototype.Run = function() {
+ this.hasRun = this.currentIndex === this.cases.length;
+ if (this.hasRun) {
+ PostMicrotask(Run);
+ return;
+ }
+
+ // TestCase.prototype.Run cannot throw an exception.
+ this.cases[this.currentIndex].Run(this, function() {
+ ++this.currentIndex;
+ PostMicrotask(Run);
+ }.bind(this));
+};
+
+TestSuite.prototype.numRegularTestCases = function() {
+ var n = 0;
+ for (var i = 0; i < this.cases.length; ++i) {
+ if (this.cases[i].isRegular) {
+ ++n;
+ }
+ }
+ return n;
+}
+
+TestSuite.prototype.ReportError = function(testCase, e) {
+ if (testCase.hasDone) {
+ return;
+ }
+ testCase.MarkAsDone();
+ this.hasRun = this.currentIndex === this.cases.length;
+ print('FAIL: ' + this.description + '#' + testCase.name + ': ' +
+ e.name + ' (' + e.message + ')');
+ ++this.currentIndex;
+ PostMicrotask(Run);
+};
+
+describe = function(description, fn) {
+ var parent = context.beingDescribed;
+ var incomplete = {
+ cases: [],
+ description: parent ? parent.description + ' ' + description : description,
+ parent: parent,
+ };
+ context.beingDescribed = incomplete;
+ fn();
+ context.beingDescribed = parent;
+
+ context.suites.push(new TestSuite(incomplete));
+}
+
+specify = it = function(description, fn) {
+ context.beingDescribed.cases.push({description: description, fn: fn});
+}
+
+before = function(fn) {
+ context.beingDescribed.before = fn;
+}
+
+after = function(fn) {
+ context.beingDescribed.after = fn;
+}
+
+beforeEach = function(fn) {
+ context.beingDescribed.beforeEach = fn;
+}
+
+afterEach = function(fn) {
+ context.beingDescribed.afterEach = fn;
+}
+
+}());
diff --git a/deps/v8/test/mjsunit/limit-locals.js b/deps/v8/test/promises-aplus/lib/require.js
index 1d36c80e5..725e45e64 100644
--- a/deps/v8/test/mjsunit/limit-locals.js
+++ b/deps/v8/test/promises-aplus/lib/require.js
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,23 +25,26 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Test that there is a limit of 131071 locals.
+var global = this.global || {};
-// Flags: --stack-size=1200
-
-function function_with_n_locals(n) {
- test_prefix = "prefix ";
- test_suffix = " suffix";
- var src = "test_prefix + (function () {"
- for (var i = 1; i <= n; i++) {
- src += "; var x" + i;
+// Emulates 'require' function in Node.
+// This is not a generic function: it only works for known modules.
+var require = function(name) {
+ var exports = {};
+ var path;
+ var base = 'test/promises-aplus/'
+ if (name.search('./helpers/') === 0) {
+ path = base + 'promises-tests/lib/tests/' + name + '.js';
+ } else if (name === 'assert') {
+ path = base + 'lib/assert.js';
+ } else if (name === 'sinon') {
+ path = base + 'sinon/sinon.js';
+ } else {
+ throw Error('We cannnot load the library: ' + name);
}
- src += "; return " + n + ";})() + test_suffix";
- return eval(src);
-}
-
-assertEquals("prefix 0 suffix", function_with_n_locals(0));
-assertEquals("prefix 16000 suffix", function_with_n_locals(16000));
-assertEquals("prefix 131071 suffix", function_with_n_locals(131071));
-
-assertThrows("function_with_n_locals(131072)");
+ eval('(function() { ' + read(path) + '}())');
+ if (name === 'sinon') {
+ return this.sinon;
+ }
+ return exports;
+};
diff --git a/deps/v8/test/promises-aplus/lib/run-tests.js b/deps/v8/test/promises-aplus/lib/run-tests.js
new file mode 100644
index 000000000..6a0a96c63
--- /dev/null
+++ b/deps/v8/test/promises-aplus/lib/run-tests.js
@@ -0,0 +1,29 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Defined in lib/mocha.js
+RunAllTests();
diff --git a/deps/v8/test/promises-aplus/promises-aplus.status b/deps/v8/test/promises-aplus/promises-aplus.status
new file mode 100644
index 000000000..c68eae96e
--- /dev/null
+++ b/deps/v8/test/promises-aplus/promises-aplus.status
@@ -0,0 +1,34 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+[
+[ALWAYS, {
+ # http://crbug.com/347455
+ '2.2.7': FAIL
+}], # ALWAYS
+]
diff --git a/deps/v8/test/promises-aplus/testcfg.py b/deps/v8/test/promises-aplus/testcfg.py
new file mode 100644
index 000000000..99495e6fe
--- /dev/null
+++ b/deps/v8/test/promises-aplus/testcfg.py
@@ -0,0 +1,148 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import hashlib
+import os
+import shutil
+import sys
+import tarfile
+import urllib
+
+from testrunner.local import testsuite
+from testrunner.objects import testcase
+
+
+SINON_TAG = '1.7.3'
+SINON_NAME = 'sinon'
+SINON_FILENAME = 'sinon.js'
+SINON_URL = 'http://sinonjs.org/releases/sinon-' + SINON_TAG + '.js'
+SINON_HASH = 'b7ab4dd9a1a2cf0460784af3728ad15caf4bbea923f680c5abde5c8332f35984'
+
+TEST_TAG = '2.0.3'
+TEST_ARCHIVE_TOP = 'promises-tests-' + TEST_TAG
+TEST_NAME = 'promises-tests'
+TEST_ARCHIVE = TEST_NAME + '.tar.gz'
+TEST_URL = 'https://github.com/promises-aplus/promises-tests/archive/' + \
+ TEST_TAG + '.tar.gz'
+TEST_ARCHIVE_HASH = \
+ 'e446ca557ac5836dd439fecd19689c243a28b1d5a6644dd7fed4274d0fa67270'
+
+
+class PromiseAplusTestSuite(testsuite.TestSuite):
+
+ def __init__(self, name, root):
+ self.root = root
+ self.test_files_root = os.path.join(self.root, TEST_NAME, 'lib', 'tests')
+ self.name = name
+ self.helper_files_pre = [
+ os.path.join(root, 'lib', name) for name in
+ ['global.js', 'require.js', 'mocha.js', 'adapter.js']
+ ]
+ self.helper_files_post = [
+ os.path.join(root, 'lib', name) for name in
+ ['run-tests.js']
+ ]
+
+ def CommonTestName(self, testcase):
+ return testcase.path.split(os.path.sep)[-1]
+
+ def ListTests(self, context):
+ return [testcase.TestCase(self, fname[:-len('.js')]) for fname in
+ os.listdir(os.path.join(self.root, TEST_NAME, 'lib', 'tests'))
+ if fname.endswith('.js')]
+
+ def GetFlagsForTestCase(self, testcase, context):
+ return (testcase.flags + context.mode_flags + ['--harmony'] +
+ self.helper_files_pre +
+ [os.path.join(self.test_files_root, testcase.path + '.js')] +
+ self.helper_files_post)
+
+ def GetSourceForTest(self, testcase):
+ filename = os.path.join(self.root, TEST_NAME,
+ 'lib', 'tests', testcase.path + '.js')
+ return 'print("FAIL: fail");'
+ with open(filename) as f:
+ return f.read()
+
+ def IsNegativeTest(self, testcase):
+ return '@negative' in self.GetSourceForTest(testcase)
+
+ def IsFailureOutput(self, output, testpath):
+ if output.exit_code != 0:
+ return True
+ return not 'All tests have run.' in output.stdout or \
+ 'FAIL:' in output.stdout
+
+ def DownloadTestData(self):
+ archive = os.path.join(self.root, TEST_ARCHIVE)
+ directory = os.path.join(self.root, TEST_NAME)
+ if not os.path.exists(archive):
+ print('Downloading {0} from {1} ...'.format(TEST_NAME, TEST_URL))
+ urllib.urlretrieve(TEST_URL, archive)
+ if os.path.exists(directory):
+ shutil.rmtree(directory)
+
+ if not os.path.exists(directory):
+ print('Extracting {0} ...'.format(TEST_ARCHIVE))
+ hash = hashlib.sha256()
+ with open(archive, 'rb') as f:
+ for chunk in iter(lambda: f.read(8192), ''):
+ hash.update(chunk)
+ if hash.hexdigest() != TEST_ARCHIVE_HASH:
+ os.remove(archive)
+ raise Exception('Hash mismatch of test data file')
+ archive = tarfile.open(archive, 'r:gz')
+ if sys.platform in ('win32', 'cygwin'):
+ # Magic incantation to allow longer path names on Windows.
+ archive.extractall(u'\\\\?\\%s' % self.root)
+ else:
+ archive.extractall(self.root)
+ shutil.move(os.path.join(self.root, TEST_ARCHIVE_TOP), directory)
+
+ def DownloadSinon(self):
+ directory = os.path.join(self.root, SINON_NAME)
+ if not os.path.exists(directory):
+ os.mkdir(directory)
+ path = os.path.join(directory, SINON_FILENAME)
+ if not os.path.exists(path):
+ urllib.urlretrieve(SINON_URL, path)
+ hash = hashlib.sha256()
+ with open(path, 'rb') as f:
+ for chunk in iter(lambda: f.read(8192), ''):
+ hash.update(chunk)
+ if hash.hexdigest() != SINON_HASH:
+ os.remove(path)
+ raise Exception('Hash mismatch of test data file')
+
+ def DownloadData(self):
+ self.DownloadTestData()
+ self.DownloadSinon()
+
+
+def GetSuite(name, root):
+ return PromiseAplusTestSuite(name, root)
diff --git a/deps/v8/test/test262/README b/deps/v8/test/test262/README
index 680ab77d7..e975fbb43 100644
--- a/deps/v8/test/test262/README
+++ b/deps/v8/test/test262/README
@@ -2,13 +2,15 @@ This directory contains code for binding the test262 test suite
into the v8 test harness. To use the tests check out the test262
tests from
- http://hg.ecmascript.org/tests/test262
+ https://github.com/tc39/test262
-at revision 365 as 'data' in this directory. Using later version
-may be possible but the tests are only known to pass (and indeed run)
+at revision 365 (hash fbba29f) as 'data' in this directory. Using later
+version may be possible but the tests are only known to pass (and indeed run)
with that revision.
-hg clone -r 365 http://hg.ecmascript.org/tests/test262 data
+ git clone https://github.com/tc39/test262 data
+ cd data
+ git checkout fbba29f
If you do update to a newer revision you may have to change the test
harness adapter code since it uses internal functionality from the
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index b5bf2288f..b844bdca5 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -99,7 +99,7 @@
'S15.1.3.2_A2.5_T1': [PASS, ['mode == debug', SKIP]],
}], # ALWAYS
-['arch == arm or arch == mipsel', {
+['arch == arm or arch == mipsel or arch == arm64', {
# TODO(mstarzinger): Causes stack overflow on simulators due to eager
# compilation of parenthesized function literals. Needs investigation.
@@ -112,5 +112,5 @@
'S15.1.3.2_A2.5_T1': [SKIP],
'S15.1.3.3_A2.3_T1': [SKIP],
'S15.1.3.4_A2.3_T1': [SKIP],
-}], # 'arch == arm or arch == mipsel'
+}], # 'arch == arm or arch == mipsel or arch == arm64'
]
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index 89f729d9a..8e129d314 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -28,6 +28,7 @@
import hashlib
import os
+import shutil
import sys
import tarfile
import urllib
@@ -36,9 +37,9 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
-TEST_262_ARCHIVE_REVISION = "99aac3bc1cad" # This is the r365 revision.
-TEST_262_ARCHIVE_MD5 = "aadbd720ce9bdb4f8f3de066f4d7eea1"
-TEST_262_URL = "http://hg.ecmascript.org/tests/test262/archive/%s.tar.bz2"
+TEST_262_ARCHIVE_REVISION = "fbba29f" # This is the r365 revision.
+TEST_262_ARCHIVE_MD5 = "e1ff0db438cc12de8fb6da80621b4ef6"
+TEST_262_URL = "https://github.com/tc39/test262/tarball/%s"
TEST_262_HARNESS = ["sta.js", "testBuiltInObject.js", "testIntl.js"]
@@ -91,16 +92,18 @@ class Test262TestSuite(testsuite.TestSuite):
def DownloadData(self):
revision = TEST_262_ARCHIVE_REVISION
archive_url = TEST_262_URL % revision
- archive_name = os.path.join(self.root, "test262-%s.tar.bz2" % revision)
+ archive_name = os.path.join(self.root, "tc39-test262-%s.tar.gz" % revision)
directory_name = os.path.join(self.root, "data")
directory_old_name = os.path.join(self.root, "data.old")
if not os.path.exists(archive_name):
print "Downloading test data from %s ..." % archive_url
urllib.urlretrieve(archive_url, archive_name)
if os.path.exists(directory_name):
+ if os.path.exists(directory_old_name):
+ shutil.rmtree(directory_old_name)
os.rename(directory_name, directory_old_name)
if not os.path.exists(directory_name):
- print "Extracting test262-%s.tar.bz2 ..." % revision
+ print "Extracting test262-%s.tar.gz ..." % revision
md5 = hashlib.md5()
with open(archive_name, "rb") as f:
for chunk in iter(lambda: f.read(8192), ""):
@@ -108,13 +111,13 @@ class Test262TestSuite(testsuite.TestSuite):
if md5.hexdigest() != TEST_262_ARCHIVE_MD5:
os.remove(archive_name)
raise Exception("Hash mismatch of test data file")
- archive = tarfile.open(archive_name, "r:bz2")
+ archive = tarfile.open(archive_name, "r:gz")
if sys.platform in ("win32", "cygwin"):
# Magic incantation to allow longer path names on Windows.
archive.extractall(u"\\\\?\\%s" % self.root)
else:
archive.extractall(self.root)
- os.rename(os.path.join(self.root, "test262-%s" % revision),
+ os.rename(os.path.join(self.root, "tc39-test262-%s" % revision),
directory_name)
diff --git a/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt b/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt
index b8c4bec10..52babed02 100644
--- a/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt
+++ b/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt
@@ -63,11 +63,11 @@ FAIL getSortedOwnPropertyNames(decodeURI) should be length,name. Was arguments,c
FAIL getSortedOwnPropertyNames(decodeURIComponent) should be length,name. Was arguments,caller,length,name.
FAIL getSortedOwnPropertyNames(encodeURI) should be length,name. Was arguments,caller,length,name.
FAIL getSortedOwnPropertyNames(encodeURIComponent) should be length,name. Was arguments,caller,length,name.
-FAIL getSortedOwnPropertyNames(Object) should be create,defineProperties,defineProperty,freeze,getOwnPropertyDescriptor,getOwnPropertyNames,getPrototypeOf,isExtensible,isFrozen,isSealed,keys,length,name,preventExtensions,prototype,seal,setPrototypeOf. Was arguments,caller,create,defineProperties,defineProperty,freeze,getOwnPropertyDescriptor,getOwnPropertyNames,getPrototypeOf,is,isExtensible,isFrozen,isSealed,keys,length,name,preventExtensions,prototype,seal,setPrototypeOf.
+FAIL getSortedOwnPropertyNames(Object) should be create,defineProperties,defineProperty,freeze,getOwnPropertyDescriptor,getOwnPropertyNames,getPrototypeOf,isExtensible,isFrozen,isSealed,keys,length,name,preventExtensions,prototype,seal,setPrototypeOf. Was arguments,caller,create,defineProperties,defineProperty,deliverChangeRecords,freeze,getNotifier,getOwnPropertyDescriptor,getOwnPropertyNames,getPrototypeOf,is,isExtensible,isFrozen,isSealed,keys,length,name,observe,preventExtensions,prototype,seal,setPrototypeOf,unobserve.
PASS getSortedOwnPropertyNames(Object.prototype) is ['__defineGetter__', '__defineSetter__', '__lookupGetter__', '__lookupSetter__', '__proto__', 'constructor', 'hasOwnProperty', 'isPrototypeOf', 'propertyIsEnumerable', 'toLocaleString', 'toString', 'valueOf']
FAIL getSortedOwnPropertyNames(Function) should be length,name,prototype. Was arguments,caller,length,name,prototype.
FAIL getSortedOwnPropertyNames(Function.prototype) should be apply,bind,call,constructor,length,name,toString. Was apply,arguments,bind,call,caller,constructor,length,name,toString.
-FAIL getSortedOwnPropertyNames(Array) should be isArray,length,name,prototype. Was arguments,caller,isArray,length,name,prototype.
+FAIL getSortedOwnPropertyNames(Array) should be isArray,length,name,prototype. Was arguments,caller,isArray,length,name,observe,prototype,unobserve.
PASS getSortedOwnPropertyNames(Array.prototype) is ['concat', 'constructor', 'every', 'filter', 'forEach', 'indexOf', 'join', 'lastIndexOf', 'length', 'map', 'pop', 'push', 'reduce', 'reduceRight', 'reverse', 'shift', 'slice', 'some', 'sort', 'splice', 'toLocaleString', 'toString', 'unshift']
FAIL getSortedOwnPropertyNames(String) should be fromCharCode,length,name,prototype. Was arguments,caller,fromCharCode,length,name,prototype.
PASS getSortedOwnPropertyNames(String.prototype) is ['anchor', 'big', 'blink', 'bold', 'charAt', 'charCodeAt', 'concat', 'constructor', 'fixed', 'fontcolor', 'fontsize', 'indexOf', 'italics', 'lastIndexOf', 'length', 'link', 'localeCompare', 'match', 'normalize', 'replace', 'search', 'slice', 'small', 'split', 'strike', 'sub', 'substr', 'substring', 'sup', 'toLocaleLowerCase', 'toLocaleUpperCase', 'toLowerCase', 'toString', 'toUpperCase', 'trim', 'trimLeft', 'trimRight', 'valueOf']
diff --git a/deps/v8/test/webkit/fast/js/Promise-already-rejected-expected.txt b/deps/v8/test/webkit/fast/js/Promise-already-rejected-expected.txt
new file mode 100644
index 000000000..ab0eec842
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-already-rejected-expected.txt
@@ -0,0 +1,9 @@
+Resolve or reject do not take effect on a rejected Promise.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS result is "foo"
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-already-rejected.js b/deps/v8/test/webkit/fast/js/Promise-already-rejected.js
new file mode 100644
index 000000000..d1b5028d5
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-already-rejected.js
@@ -0,0 +1,41 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Resolve or reject do not take effect on a rejected Promise.');
+
+var result;
+
+new Promise(function(resolve, reject) {
+ reject('foo');
+ resolve('resolve');
+ reject('reject');
+}).then(function() {
+ testFailed('fulfilled');
+ finishJSTest();
+}, function(localResult) {
+ result = localResult;
+ shouldBeEqualToString('result', 'foo');
+ finishJSTest();
+});
diff --git a/deps/v8/test/webkit/fast/js/Promise-already-resolved-expected.txt b/deps/v8/test/webkit/fast/js/Promise-already-resolved-expected.txt
new file mode 100644
index 000000000..a78ffd4ce
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-already-resolved-expected.txt
@@ -0,0 +1,9 @@
+Resolve or reject do not take effect on a resolved Promise.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS result is "foo"
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-already-resolved.js b/deps/v8/test/webkit/fast/js/Promise-already-resolved.js
new file mode 100644
index 000000000..7a8640ae1
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-already-resolved.js
@@ -0,0 +1,43 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Resolve or reject do not take effect on a resolved Promise.');
+
+var result;
+
+new Promise(function(resolve, reject) {
+ var anotherResolve;
+ resolve(new Promise(function(r) { anotherResolve = r; }));
+ resolve('resolve');
+ reject('reject');
+ anotherResolve('foo');
+}).then(function(localResult) {
+ result = localResult;
+ shouldBeEqualToString('result', 'foo');
+ finishJSTest();
+}, function() {
+ testFailed('rejected');
+ finishJSTest();
+});
diff --git a/deps/v8/test/webkit/fast/js/Promise-catch-expected.txt b/deps/v8/test/webkit/fast/js/Promise-catch-expected.txt
new file mode 100644
index 000000000..7031d7705
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-catch-expected.txt
@@ -0,0 +1,15 @@
+Test Promise.prototype.catch.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS thisInInit is undefined
+PASS firstPromise instanceof Promise is true
+PASS secondPromise instanceof Promise is true
+PASS thisInOnFulfilled is undefined
+PASS result is "hello"
+PASS result is "bye"
+PASS fulfilled
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-catch.js b/deps/v8/test/webkit/fast/js/Promise-catch.js
new file mode 100644
index 000000000..88b053a53
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-catch.js
@@ -0,0 +1,71 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test Promise.prototype.catch.');
+
+var reject;
+var result;
+var thisInInit;
+var thisInOnFulfilled;
+
+var firstPromise = new Promise(function(_, newReject) {
+ thisInInit = this;
+ reject = newReject;
+});
+
+var secondPromise = firstPromise.catch(function(localResult) {
+ thisInOnFulfilled = this;
+ shouldBe('thisInOnFulfilled', 'undefined');
+ result = localResult;
+ shouldBeEqualToString('result', 'hello');
+ return 'bye';
+});
+
+secondPromise.then(function(localResult) {
+ result = localResult;
+ shouldBeEqualToString('result', 'bye');
+ testPassed('fulfilled');
+ finishJSTest();
+}, function() {
+ testFailed('rejected');
+ finishJSTest();
+});
+
+shouldBe('thisInInit', 'undefined');
+shouldBeTrue('firstPromise instanceof Promise');
+shouldBeTrue('secondPromise instanceof Promise');
+
+try {
+ firstPromise.catch(null);
+} catch (e) {
+ testFailed('catch(null) should not throw an exception');
+}
+try {
+ firstPromise.catch(37);
+} catch (e) {
+ testFailed('catch(37) should not throw an exception');
+}
+
+reject('hello');
diff --git a/deps/v8/test/webkit/fast/js/Promise-chained-then-expected.txt b/deps/v8/test/webkit/fast/js/Promise-chained-then-expected.txt
new file mode 100644
index 000000000..6d1150552
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-chained-then-expected.txt
@@ -0,0 +1,19 @@
+Test chained Promise.prototype.then.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+This should be the first debug output.
+PASS fulfilled
+PASS result is "hello"
+PASS fulfilled
+PASS result is "hello2"
+PASS rejected
+PASS result is "error"
+PASS rejected
+PASS result is "error2"
+PASS fulfilled
+PASS result is "recovered"
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-chained-then.js b/deps/v8/test/webkit/fast/js/Promise-chained-then.js
new file mode 100644
index 000000000..b69e847d9
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-chained-then.js
@@ -0,0 +1,72 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test chained Promise.prototype.then.');
+
+var resolve;
+var promise = new Promise(function (r) {resolve = r;});
+var result;
+
+promise.then(function(localResult) { // fulfilled - continue
+ testPassed('fulfilled');
+ result = localResult;
+ shouldBeEqualToString('result', 'hello');
+ return 'hello2';
+}, function() {
+ testFailed('rrejected');
+}).then() // pass through
+.then(function(localResult) { // fulfilled - throw an exception
+ testPassed('fulfilled');
+ result = localResult;
+ shouldBeEqualToString('result', 'hello2');
+ throw 'error';
+}, function() {
+ testFailed('rejected');
+}).then(function() { // rejected - throw an exception
+ testFailed('fulfilled');
+}, function(localResult) {
+ testPassed('rejected');
+ result = localResult;
+ shouldBeEqualToString('result', 'error');
+ throw 'error2';
+}).then() // pass through
+.then(function() { // rejected - recover
+ testFailed('fulfilled');
+}, function(localResult) {
+ testPassed('rejected');
+ result = localResult;
+ shouldBeEqualToString('result', 'error2');
+ return 'recovered';
+}).then(function(localResult) { // fulfilled - the last
+ testPassed('fulfilled');
+ result = localResult;
+ shouldBeEqualToString('result', 'recovered');
+ finishJSTest();
+}, function() {
+ testFailed('rejected');
+ finishJSTest();
+});
+
+resolve('hello');
+debug('This should be the first debug output.');
diff --git a/deps/v8/test/webkit/fast/js/Promise-exception-expected.txt b/deps/v8/test/webkit/fast/js/Promise-exception-expected.txt
new file mode 100644
index 000000000..3b638c9e6
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-exception-expected.txt
@@ -0,0 +1,10 @@
+An exception thrown from an onFulfilled callback should reject the Promise.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS thisInThenCallback is undefined
+PASS result is "foobar"
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-exception.js b/deps/v8/test/webkit/fast/js/Promise-exception.js
new file mode 100644
index 000000000..2bc3ca4b9
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-exception.js
@@ -0,0 +1,43 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('An exception thrown from an onFulfilled callback should reject the Promise.');
+
+var thisInThenCallback;
+var result;
+
+new Promise(function(resolve) {
+ resolve('hello');
+}).then(function(result) {
+ throw 'foobar';
+}).then(function(localResult) {
+ testFailed('Unexpected invocation of onFulfilled');
+}, function(localResult) {
+ thisInThenCallback = this;
+ shouldBe('thisInThenCallback', 'undefined');
+ result = localResult;
+ shouldBeEqualToString('result', 'foobar');
+ finishJSTest();
+});
diff --git a/deps/v8/test/webkit/fast/js/Promise-init-callback-receiver-expected.txt b/deps/v8/test/webkit/fast/js/Promise-init-callback-receiver-expected.txt
new file mode 100644
index 000000000..d5b6e248e
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-init-callback-receiver-expected.txt
@@ -0,0 +1,9 @@
+|this| in Promise constructor should be undefined.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS receiverInStrict is undefined
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-init-callback-receiver.js b/deps/v8/test/webkit/fast/js/Promise-init-callback-receiver.js
new file mode 100644
index 000000000..af06ad2e5
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-init-callback-receiver.js
@@ -0,0 +1,32 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('|this| in Promise constructor should be undefined.');
+
+var receiverInStrict;
+new Promise(function () {
+ receiverInStrict = this;
+ shouldBe('receiverInStrict', 'undefined');
+});
diff --git a/deps/v8/test/webkit/fast/js/Promise-init-expected.txt b/deps/v8/test/webkit/fast/js/Promise-init-expected.txt
new file mode 100644
index 000000000..8db585254
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-init-expected.txt
@@ -0,0 +1,19 @@
+Test Promise construction.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS promise instanceof Promise is true
+PASS promise.constructor is Promise
+PASS thisInInit is undefined
+PASS resolve instanceof Function is true
+PASS reject instanceof Function is true
+PASS new Promise() threw exception TypeError: Promise resolver undefined is not a function.
+PASS new Promise(37) threw exception TypeError: Promise resolver 37 is not a function.
+PASS promise = new Promise(function() { throw Error("foo"); }) did not throw exception.
+PASS result.message is "foo"
+PASS fulfilled
+PASS result is "hello"
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-init.js b/deps/v8/test/webkit/fast/js/Promise-init.js
new file mode 100644
index 000000000..d9efe4a5e
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-init.js
@@ -0,0 +1,70 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test Promise construction.');
+
+var thisInInit;
+var resolve, reject;
+var result;
+var promise = new Promise(function(newResolve, newReject) {
+ thisInInit = this;
+ resolve = newResolve;
+ reject = newReject;
+});
+
+shouldBeTrue('promise instanceof Promise');
+shouldBe('promise.constructor', 'Promise');
+shouldBe('thisInInit', 'undefined');
+shouldBeTrue('resolve instanceof Function');
+shouldBeTrue('reject instanceof Function');
+
+shouldThrow('new Promise()', '"TypeError: Promise resolver undefined is not a function"');
+shouldThrow('new Promise(37)', '"TypeError: Promise resolver 37 is not a function"');
+
+try {
+ promise = new Promise(function() { throw Error('foo'); });
+ testPassed('promise = new Promise(function() { throw Error("foo"); }) did not throw exception.');
+} catch (e) {
+ testFailed('new Promise(function() { throw Error(\'foo\'); }) should not throw an exception.');
+}
+
+promise.then(undefined, function(localResult) {
+ result = localResult;
+ shouldBeEqualToString('result.message', 'foo');
+});
+
+new Promise(function(resolve) {
+ resolve("hello");
+ throw Error("foo");
+}).then(function(localResult) {
+ result = localResult;
+ testPassed('fulfilled');
+ shouldBeEqualToString('result', 'hello');
+ finishJSTest();
+}, function(localResult) {
+ result = localResult;
+ testFailed('rejected');
+ finishJSTest();
+});
diff --git a/deps/v8/test/webkit/fast/js/Promise-onFulfilled-deep-expected.txt b/deps/v8/test/webkit/fast/js/Promise-onFulfilled-deep-expected.txt
new file mode 100644
index 000000000..158f9f146
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-onFulfilled-deep-expected.txt
@@ -0,0 +1,10 @@
+Test whether deeply chained then-s work.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS result is undefined
+PASS result is 5042
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-onFulfilled-deep.js b/deps/v8/test/webkit/fast/js/Promise-onFulfilled-deep.js
new file mode 100644
index 000000000..9e19aa56b
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-onFulfilled-deep.js
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test whether deeply chained then-s work.');
+
+var result;
+var resolve;
+var promise = new Promise(function (r) { resolve = r; });
+
+for (var i = 0; i < 5000; ++i) {
+ promise = promise.then(function (value) { return value + 1; }, function () { testFailed('rejected'); });
+}
+
+promise.then(function (value) {
+ result = value;
+ shouldBe('result', '5042');
+}).then(finishJSTest, finishJSTest);
+
+shouldBe('result', 'undefined');
+resolve(42);
diff --git a/deps/v8/test/webkit/fast/js/Promise-onRejected-deep-expected.txt b/deps/v8/test/webkit/fast/js/Promise-onRejected-deep-expected.txt
new file mode 100644
index 000000000..158f9f146
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-onRejected-deep-expected.txt
@@ -0,0 +1,10 @@
+Test whether deeply chained then-s work.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS result is undefined
+PASS result is 5042
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-onRejected-deep.js b/deps/v8/test/webkit/fast/js/Promise-onRejected-deep.js
new file mode 100644
index 000000000..b9c281fba
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-onRejected-deep.js
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test whether deeply chained then-s work.');
+
+var result;
+var reject;
+var promise = new Promise(function (_, r) { reject = r; });
+
+for (var i = 0; i < 5000; ++i) {
+ promise = promise.then(function (value) { testFailed('fulfilled'); throw value + 1; }, function (value) { throw value + 1; });
+}
+
+promise.catch(function (value) {
+ result = value;
+ shouldBe('result', '5042');
+}).then(finishJSTest, finishJSTest);
+
+shouldBe('result', 'undefined');
+reject(42);
diff --git a/deps/v8/test/webkit/fast/js/Promise-reject-expected.txt b/deps/v8/test/webkit/fast/js/Promise-reject-expected.txt
new file mode 100644
index 000000000..e9949852d
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-reject-expected.txt
@@ -0,0 +1,12 @@
+Test Promise rejection.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS promiseState is "pending"
+PASS promiseState is "pending"
+PASS promiseState is "rejected"
+PASS promiseResult is "hello"
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-reject.js b/deps/v8/test/webkit/fast/js/Promise-reject.js
new file mode 100644
index 000000000..82413d598
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-reject.js
@@ -0,0 +1,53 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test Promise rejection.');
+
+var reject;
+var promise = new Promise(function(_, r) { reject = r; });
+var promiseState = 'pending';
+var promiseResult = undefined;
+promise.then(function(result) {
+ promiseState = 'fulfilled';
+ promiseResult = result;
+}, function(result) {
+ promiseState = 'rejected';
+ promiseResult = result;
+});
+
+shouldBeEqualToString('promiseState', 'pending');
+
+reject('hello');
+
+shouldBeEqualToString('promiseState', 'pending');
+
+promise.then(function() {
+ testFailed('fulfilled.');
+ finishJSTest();
+}, function() {
+ shouldBeEqualToString('promiseState', 'rejected');
+ shouldBeEqualToString('promiseResult', 'hello');
+ finishJSTest();
+});
diff --git a/deps/v8/test/webkit/fast/js/Promise-resolve-chain-expected.txt b/deps/v8/test/webkit/fast/js/Promise-resolve-chain-expected.txt
new file mode 100644
index 000000000..2a3062ee1
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-resolve-chain-expected.txt
@@ -0,0 +1,10 @@
+Test chained Promise resolutions.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS result is "hello"
+PASS result is "bye"
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-resolve-chain.js b/deps/v8/test/webkit/fast/js/Promise-resolve-chain.js
new file mode 100644
index 000000000..558bf53ee
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-resolve-chain.js
@@ -0,0 +1,60 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test chained Promise resolutions.');
+
+var resolve1, resolve2, resolve3;
+var reject4, resolve5, resolve6;
+var result;
+var promise1 = new Promise(function(r) { resolve1 = r; });
+var promise2 = new Promise(function(r) { resolve2 = r; });
+var promise3 = new Promise(function(r) { resolve3 = r; });
+var promise4 = new Promise(function(_, r) { reject4 = r; });
+var promise5 = new Promise(function(r) { resolve5 = r; });
+var promise6 = new Promise(function(r) { resolve6 = r; });
+
+resolve3(promise2);
+resolve2(promise1);
+resolve6(promise5);
+resolve5(promise4);
+
+promise3.then(function(localResult) {
+ result = localResult;
+ shouldBeEqualToString('result', 'hello');
+}, function() {
+ testFailed('rejected');
+});
+
+promise6.then(function() {
+ testFailed('fulfilled');
+ finishJSTest();
+}, function(localResult) {
+ result = localResult;
+ shouldBeEqualToString('result', 'bye');
+ finishJSTest();
+});
+
+resolve1('hello');
+reject4('bye');
diff --git a/deps/v8/test/webkit/fast/js/Promise-resolve-expected.txt b/deps/v8/test/webkit/fast/js/Promise-resolve-expected.txt
new file mode 100644
index 000000000..fcf5da88c
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-resolve-expected.txt
@@ -0,0 +1,10 @@
+Test Promise resolution.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS thisInOnFulfilled is undefined
+PASS result is "hello"
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-resolve-state-expected.txt b/deps/v8/test/webkit/fast/js/Promise-resolve-state-expected.txt
new file mode 100644
index 000000000..58d20c249
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-resolve-state-expected.txt
@@ -0,0 +1,12 @@
+Test whether Promise processes microtasks in the correct order.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS promiseState is "pending"
+PASS promiseState is "pending"
+PASS promiseState is "fulfilled"
+PASS promiseResult is "hello"
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-resolve-state.js b/deps/v8/test/webkit/fast/js/Promise-resolve-state.js
new file mode 100644
index 000000000..165d1149b
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-resolve-state.js
@@ -0,0 +1,53 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test whether Promise processes microtasks in the correct order.');
+
+var resolve;
+var promise = new Promise(function(r) { resolve = r; });
+var promiseState = 'pending';
+var promiseResult = undefined;
+promise.then(function(result) {
+ promiseState = 'fulfilled';
+ promiseResult = result;
+}, function(result) {
+ promiseState = 'rejected';
+ promiseResult = result;
+});
+
+shouldBeEqualToString('promiseState', 'pending');
+
+resolve('hello');
+
+shouldBeEqualToString('promiseState', 'pending');
+
+promise.then(function() {
+ shouldBeEqualToString('promiseState', 'fulfilled');
+ shouldBeEqualToString('promiseResult', 'hello');
+ finishJSTest();
+}, function() {
+ testFailed('promise is rejected.');
+ finishJSTest();
+});
diff --git a/deps/v8/test/webkit/fast/js/Promise-resolve-with-itself-expected.txt b/deps/v8/test/webkit/fast/js/Promise-resolve-with-itself-expected.txt
new file mode 100644
index 000000000..b7fdaef0c
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-resolve-with-itself-expected.txt
@@ -0,0 +1,10 @@
+Test whether Promise will be rejected if it is resolved with itself.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS rejected
+PASS result is "TypeError: Chaining cycle detected for promise #<Promise>"
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-resolve-with-itself.js b/deps/v8/test/webkit/fast/js/Promise-resolve-with-itself.js
new file mode 100644
index 000000000..22d05cd29
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-resolve-with-itself.js
@@ -0,0 +1,40 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test whether Promise will be rejected if it is resolved with itself.');
+
+var resolve;
+var result;
+
+var promise = new Promise(function(r) { resolve = r; });
+promise.then(function () {
+ testFailed('fulfilled');
+}, function (error) {
+ testPassed('rejected');
+ result = error.toString();
+ shouldBeEqualToString('result', 'TypeError: Chaining cycle detected for promise #<Promise>');
+}).then(finishJSTest, finishJSTest);
+
+resolve(promise);
diff --git a/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-exception-expected.txt b/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-exception-expected.txt
new file mode 100644
index 000000000..7c2a301dc
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-exception-expected.txt
@@ -0,0 +1,11 @@
+Test whether Promise treats thenable correctly.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+The promise is already rejected now.
+PASS rejected
+PASS result is "hello"
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-exception.js b/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-exception.js
new file mode 100644
index 000000000..2604cb6e3
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-exception.js
@@ -0,0 +1,43 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test whether Promise treats thenable correctly.');
+
+var callback;
+var result;
+
+new Promise(function(resolve) {
+ resolve({then: function() { throw 'hello'; }});
+}).then(function() {
+ testFailed('fulfilled');
+ finishJSTest();
+}, function(localResult) {
+ testPassed('rejected');
+ result = localResult
+ shouldBeEqualToString('result', 'hello');
+ finishJSTest();
+});
+
+debug('The promise is already rejected now.');
diff --git a/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-fulfill-expected.txt b/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-fulfill-expected.txt
new file mode 100644
index 000000000..03447532a
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-fulfill-expected.txt
@@ -0,0 +1,13 @@
+Test whether Promise treats thenable correctly.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+The promise is not fulfilled now.
+PASS value.then is called.
+PASS thisValue is value
+PASS fulfilled
+PASS result is "hello"
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-fulfill.js b/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-fulfill.js
new file mode 100644
index 000000000..b627286b5
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-fulfill.js
@@ -0,0 +1,51 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test whether Promise treats thenable correctly.');
+
+var thisValue;
+var result;
+var value = {
+ then: function(onFulfilled) {
+ testPassed('value.then is called.');
+ thisValue = this;
+ shouldBe('thisValue', 'value');
+ onFulfilled('hello');
+ }
+};
+
+new Promise(function(resolve) {
+ resolve(value);
+}).then(function(localResult) {
+ testPassed('fulfilled');
+ result = localResult;
+ shouldBeEqualToString('result', 'hello');
+ finishJSTest();
+}, function() {
+ testFailed('rejected');
+ finishJSTest();
+});
+
+debug('The promise is not fulfilled now.');
diff --git a/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-reject-expected.txt b/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-reject-expected.txt
new file mode 100644
index 000000000..d0fd234e4
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-reject-expected.txt
@@ -0,0 +1,13 @@
+Test whether Promise treats thenable correctly.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+The promise is not rejected now.
+PASS value.then is called.
+PASS thisValue is value
+PASS rejected
+PASS result is "hello"
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-reject.js b/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-reject.js
new file mode 100644
index 000000000..1923a8de1
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-resolve-with-then-reject.js
@@ -0,0 +1,51 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test whether Promise treats thenable correctly.');
+
+var thisValue;
+var result;
+var value = {
+ then: function(onFulfilled, onRejected) {
+ testPassed('value.then is called.');
+ thisValue = this;
+ shouldBe('thisValue', 'value');
+ onRejected('hello');
+ }
+};
+
+new Promise(function(resolve) {
+ resolve(value);
+}).then(function() {
+ testFailed('fulfilled');
+ finishJSTest();
+}, function(localResult) {
+ testPassed('rejected');
+ result = localResult;
+ shouldBeEqualToString('result', 'hello');
+ finishJSTest();
+});
+
+debug('The promise is not rejected now.');
diff --git a/deps/v8/test/webkit/fast/js/Promise-resolve.js b/deps/v8/test/webkit/fast/js/Promise-resolve.js
new file mode 100644
index 000000000..40f313d76
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-resolve.js
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test Promise resolution.');
+
+var thisInOnFulfilled;
+var result;
+
+new Promise(function(resolve) {
+ resolve('hello');
+}).then(function(localResult) {
+ thisInOnFulfilled = this;
+ shouldBe('thisInOnFulfilled', 'undefined');
+ result = localResult;
+ shouldBeEqualToString('result', 'hello');
+ finishJSTest();
+}, function() {
+ fail('rejected');
+ finishJSTest();
+});
diff --git a/deps/v8/test/webkit/fast/js/Promise-simple-expected.txt b/deps/v8/test/webkit/fast/js/Promise-simple-expected.txt
new file mode 100644
index 000000000..122a8f4da
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-simple-expected.txt
@@ -0,0 +1,11 @@
+Test Promise.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS thisInInit is undefined
+PASS thisInOnFulfilled is undefined
+PASS result is "hello"
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-simple.js b/deps/v8/test/webkit/fast/js/Promise-simple.js
new file mode 100644
index 000000000..047f42002
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-simple.js
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test Promise.');
+
+var resolve;
+var thisInInit;
+var thisInOnFulfilled;
+var result;
+
+new Promise(function(newResolve) {
+ thisInInit = this;
+ resolve = newResolve;
+}).then(function(localResult) {
+ thisInOnFulfilled = this;
+ shouldBe('thisInOnFulfilled', 'undefined');
+ result = localResult;
+ shouldBeEqualToString('result', 'hello');
+ finishJSTest();
+});
+
+shouldBe('thisInInit', 'undefined');
+
+resolve('hello');
diff --git a/deps/v8/test/webkit/fast/js/Promise-static-all-expected.txt b/deps/v8/test/webkit/fast/js/Promise-static-all-expected.txt
new file mode 100644
index 000000000..7adfcccc7
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-static-all-expected.txt
@@ -0,0 +1,32 @@
+Test Promise.all
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS result is undefined
+PASS Promise.all() is rejected.
+PASS Promise.all([]) is fulfilled.
+PASS result.length is 0
+PASS Promise.all([p1, p2, p3]) is fulfilled.
+PASS result.length is 3
+PASS result[0] is "p1"
+PASS result[1] is "p2"
+PASS result[2] is "p3"
+PASS Promise.all([p1, p6, p5]) is rejected.
+PASS result is "p6"
+PASS Promise.all([p9]) is fulfilled.
+PASS result.length is 1
+PASS result[0] is "p2"
+PASS Promise.all([p9,,,]) is fulfilled.
+PASS result.length is 3
+PASS result[0] is "p2"
+PASS result[1] is undefined
+PASS result[2] is undefined
+PASS Promise.all([p9,42]) is fulfilled.
+PASS result.length is 2
+PASS result[0] is "p2"
+PASS result[1] is 42
+PASS Promise.all({}) is rejected.
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-static-all.js b/deps/v8/test/webkit/fast/js/Promise-static-all.js
new file mode 100644
index 000000000..4827c658c
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-static-all.js
@@ -0,0 +1,117 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test Promise.all');
+
+var result = undefined;
+
+var p1 = new Promise(function(resolve) { resolve('p1'); });
+var p2 = new Promise(function(resolve) { resolve('p2'); });
+var p3 = new Promise(function(resolve) { resolve('p3'); });
+var p4 = new Promise(function() {});
+var p5 = new Promise(function() {});
+var p6 = new Promise(function(_, reject) { reject('p6'); });
+var p7 = new Promise(function(_, reject) { reject('p7'); });
+var p8 = new Promise(function(_, reject) { reject('p8'); });
+var p9 = new Promise(function(resolve) { resolve(p2); });
+
+Promise.all([p1, p2, p5]).then(function(result) {
+ testFailed('Promise.all([p1, p2, p5]) is fulfilled.');
+}, function() {
+ testFailed('Promise.all([p1, p2, p5]) is rejected.');
+});
+
+Promise.all().then(function() {
+ testFailed('Promise.all() is fulfilled.');
+}, function() {
+ testPassed('Promise.all() is rejected.');
+ return Promise.all([]).then(function(localResult) {
+ testPassed('Promise.all([]) is fulfilled.');
+ result = localResult;
+ shouldBe('result.length', '0');
+ }, function() {
+ testFailed('Promise.all([]) is rejected.');
+ });
+}).then(function() {
+ return Promise.all([p1, p2, p3]).then(function(localResult) {
+ testPassed('Promise.all([p1, p2, p3]) is fulfilled.');
+ result = localResult;
+ shouldBe('result.length', '3');
+ shouldBeEqualToString('result[0]', 'p1');
+ shouldBeEqualToString('result[1]', 'p2');
+ shouldBeEqualToString('result[2]', 'p3');
+ }, function() {
+ testFailed('Promise.all([p1, p2, p3]) is rejected.');
+ });
+}).then(function() {
+ return Promise.all([p1, p6, p5]).then(function(localResult) {
+ testFailed('Promise.all([p1, p6, p5]) is fulfilled.');
+ }, function(localResult) {
+ testPassed('Promise.all([p1, p6, p5]) is rejected.');
+ result = localResult;
+ shouldBeEqualToString('result', 'p6');
+ });
+}).then(function() {
+ return Promise.all([p9]).then(function(localResult) {
+ testPassed('Promise.all([p9]) is fulfilled.');
+ result = localResult;
+ shouldBe('result.length', '1');
+ shouldBeEqualToString('result[0]', 'p2');
+ }, function(result) {
+ testFailed('Promise.all([p9]) is rejected.');
+ });
+}).then(function() {
+ // Array hole should not be skipped.
+ return Promise.all([p9,,,]).then(function(localResult) {
+ testPassed('Promise.all([p9,,,]) is fulfilled.');
+ result = localResult;
+ shouldBe('result.length', '3');
+ shouldBeEqualToString('result[0]', 'p2');
+ shouldBe('result[1]', 'undefined');
+ shouldBe('result[2]', 'undefined');
+ }, function(localResult) {
+ testFailed('Promise.all([p9,,,]) is rejected.');
+ });
+}).then(function() {
+ // Immediate value should be converted to a promise object by the
+ // ToPromise operation.
+ return Promise.all([p9,42]).then(function(localResult) {
+ testPassed('Promise.all([p9,42]) is fulfilled.');
+ result = localResult;
+ shouldBe('result.length', '2');
+ shouldBeEqualToString('result[0]', 'p2');
+ shouldBe('result[1]', '42');
+ }, function(localResult) {
+ testFailed('Promise.all([p9,42]) is rejected.');
+ });
+}).then(function() {
+ return Promise.all({}).then(function(localResult) {
+ testFailed('Promise.all({}) is fulfilled.');
+ }, function(localResult) {
+ testPassed('Promise.all({}) is rejected.');
+ });
+}).then(finishJSTest, finishJSTest);
+
+shouldBe('result', 'undefined');
diff --git a/deps/v8/test/webkit/fast/js/Promise-static-cast-expected.txt b/deps/v8/test/webkit/fast/js/Promise-static-cast-expected.txt
new file mode 100644
index 000000000..2fc506977
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-static-cast-expected.txt
@@ -0,0 +1,14 @@
+Test Promise.resolve as cast
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS promise is value
+PASS result is undefined
+PASS result2 is undefined
+PASS result is "hello"
+PASS result2 is 42
+PASS fulfilled
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-static-cast.js b/deps/v8/test/webkit/fast/js/Promise-static-cast.js
new file mode 100644
index 000000000..2c1eb2f72
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-static-cast.js
@@ -0,0 +1,56 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test Promise.resolve as cast');
+
+var result = undefined;
+var result2 = undefined;
+
+var resolve;
+var value = new Promise(function (r) { resolve = r;} );
+var promise = Promise.resolve(value);
+
+// If [[IsPromise]] is true, Promise.resolve simply returns argument.
+shouldBe('promise', 'value');
+
+promise.then(function(res) {
+ result = res;
+ shouldBeEqualToString('result', 'hello');
+
+ return Promise.resolve(42).then(function (res) {
+ result2 = res;
+ shouldBe('result2', '42');
+ });
+}).then(function () {
+ testPassed('fulfilled');
+ finishJSTest();
+}, function() {
+ testFailed('rejected');
+ finishJSTest();
+});
+
+resolve('hello');
+shouldBe('result', 'undefined');
+shouldBe('result2', 'undefined');
diff --git a/deps/v8/test/webkit/fast/js/Promise-static-race-expected.txt b/deps/v8/test/webkit/fast/js/Promise-static-race-expected.txt
new file mode 100644
index 000000000..ea06f3868
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-static-race-expected.txt
@@ -0,0 +1,21 @@
+Test Promise.race
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS result is undefined
+PASS Promise.race() is rejected.
+PASS Promise.race({}) is rejected.
+PASS Promise.race([p4, p1, p6]) is fulfilled.
+PASS result is "p1"
+PASS Promise.race([p4, p6, p1]) is rejected.
+PASS result is "p6"
+PASS Promise.race([p9]) is fulfilled.
+PASS result is "p2"
+PASS Promise.race([p4,,]) is fulfilled.
+PASS result is undefined
+PASS Promise.race([p4,42]) is fulfilled.
+PASS result is 42
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-static-race.js b/deps/v8/test/webkit/fast/js/Promise-static-race.js
new file mode 100644
index 000000000..b95473ce0
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-static-race.js
@@ -0,0 +1,108 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test Promise.race');
+
+var result;
+
+var p1 = new Promise(function(resolve) { resolve('p1'); });
+var p2 = new Promise(function(resolve) { resolve('p2'); });
+var p3 = new Promise(function(resolve) { resolve('p3'); });
+var p4 = new Promise(function() {});
+var p5 = new Promise(function() {});
+var p6 = new Promise(function(_, reject) { reject('p6'); });
+var p7 = new Promise(function(_, reject) { reject('p7'); });
+var p8 = new Promise(function(_, reject) { reject('p8'); });
+var p9 = new Promise(function(resolve) { resolve(p2); });
+
+Promise.race([p4, p5]).then(function(localResult) {
+ testFailed('Promise.race([p4, p5]) is fulfilled.');
+}, function() {
+ testFailed('Promise.race([p4, p5]) is rejected.');
+});
+
+// If the argument is an empty array, the result promise won't be fulfilled.
+Promise.race([]).then(function(localResult) {
+ testFailed('Promise.race([]) is fulfilled.');
+}, function() {
+ testFailed('Promise.race([]) is rejected.');
+});
+
+Promise.race().then(function(localResult) {
+ testFailed('Promise.race() is fulfilled.');
+}, function() {
+ testPassed('Promise.race() is rejected.');
+}).then(function() {
+ return Promise.race({}).then(function(localResult) {
+ testFailed('Promise.race({}) is fulfilled.');
+ }, function() {
+ testPassed('Promise.race({}) is rejected.');
+ });
+}).then(function() {
+ return Promise.race([p4, p1, p6]).then(function(localResult) {
+ testPassed('Promise.race([p4, p1, p6]) is fulfilled.');
+ result = localResult;
+ shouldBeEqualToString('result', 'p1');
+ }, function() {
+ testFailed('Promise.race([p4, p1, p6]) is rejected.');
+ });
+}).then(function() {
+ return Promise.race([p4, p6, p1]).then(function(localResult) {
+ testFailed('Promise.race([p4, p6, p1]) is fulfilled.');
+ }, function(localResult) {
+ testPassed('Promise.race([p4, p6, p1]) is rejected.');
+ result = localResult;
+ shouldBeEqualToString('result', 'p6');
+ });
+}).then(function() {
+ return Promise.race([p9]).then(function(localResult) {
+ testPassed('Promise.race([p9]) is fulfilled.');
+ result = localResult;
+ shouldBeEqualToString('result', 'p2');
+ }, function() {
+ testFailed('Promise.race([p9]) is rejected.');
+ });
+}).then(function() {
+ // Array hole should not be skipped.
+ return Promise.race([p4,,]).then(function(localResult) {
+ testPassed('Promise.race([p4,,]) is fulfilled.');
+ result = localResult;
+ shouldBe('result', 'undefined');
+ }, function() {
+ testFailed('Promise.race([p4,,]) is rejected.');
+ });
+}).then(function() {
+ // Immediate value should be converted to a promise object by the
+ // ToPromise operation.
+ return Promise.race([p4,42]).then(function(localResult) {
+ testPassed('Promise.race([p4,42]) is fulfilled.');
+ result = localResult;
+ shouldBe('result', '42');
+ }, function() {
+ testFailed('Promise.race([p4,42]) is rejected.');
+ });
+}).then(finishJSTest, finishJSTest);
+
+shouldBe('result', 'undefined');
diff --git a/deps/v8/test/webkit/fast/js/Promise-static-reject-expected.txt b/deps/v8/test/webkit/fast/js/Promise-static-reject-expected.txt
new file mode 100644
index 000000000..b1ec5f16d
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-static-reject-expected.txt
@@ -0,0 +1,10 @@
+Test Promise.reject
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS result is undefined
+PASS result is "hello"
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-static-reject.js b/deps/v8/test/webkit/fast/js/Promise-static-reject.js
new file mode 100644
index 000000000..5f09ca3e9
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-static-reject.js
@@ -0,0 +1,38 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test Promise.reject');
+
+var result = undefined;
+
+Promise.reject('hello').then(function(result) {
+ testFailed('fulfilled');
+ finishJSTest();
+}, function(localResult) {
+ result = localResult;
+ shouldBeEqualToString('result', 'hello');
+ finishJSTest();
+});
+shouldBe('result', 'undefined');
diff --git a/deps/v8/test/webkit/fast/js/Promise-static-resolve-expected.txt b/deps/v8/test/webkit/fast/js/Promise-static-resolve-expected.txt
new file mode 100644
index 000000000..40c267e69
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-static-resolve-expected.txt
@@ -0,0 +1,10 @@
+Test Promise.resolve
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS result is undefined
+PASS result is "hello"
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-static-resolve.js b/deps/v8/test/webkit/fast/js/Promise-static-resolve.js
new file mode 100644
index 000000000..c5e4b626b
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-static-resolve.js
@@ -0,0 +1,43 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test Promise.resolve');
+
+var result = undefined;
+
+var resolve;
+var promise = Promise.resolve(new Promise(function (r) { resolve = r;} ));
+
+promise.then(function(localResult) {
+ result = localResult;
+ shouldBeEqualToString('result', 'hello');
+ finishJSTest();
+}, function() {
+ testFailed('rejected');
+ finishJSTest();
+});
+
+resolve('hello');
+shouldBe('result', 'undefined');
diff --git a/deps/v8/test/webkit/fast/js/Promise-then-callback-receiver-expected.txt b/deps/v8/test/webkit/fast/js/Promise-then-callback-receiver-expected.txt
new file mode 100644
index 000000000..6ac19ff03
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-then-callback-receiver-expected.txt
@@ -0,0 +1,12 @@
+Test whether then callback receivers are correctly set.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS fulfilled
+PASS thisInOnFulfilled is undefined
+PASS rejected
+PASS thisInOnRejected is undefined
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-then-callback-receiver.js b/deps/v8/test/webkit/fast/js/Promise-then-callback-receiver.js
new file mode 100644
index 000000000..e34bdaeb5
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-then-callback-receiver.js
@@ -0,0 +1,47 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test whether then callback receivers are correctly set.');
+
+var thisInOnFulfilled;
+var thisInOnRejected;
+
+Promise.resolve().then(function () {
+ return Promise.resolve(42).then(function () {
+ testPassed('fulfilled');
+ thisInOnFulfilled = this;
+ shouldBe('thisInOnFulfilled', 'undefined');
+ }, function () {
+ testFailed('rejected');
+ });
+}).then(function () {
+ return Promise.reject(42).then(function () {
+ testFailed('fulfilled');
+ }, function () {
+ testPassed('rejected');
+ thisInOnRejected = this;
+ shouldBe('thisInOnRejected', 'undefined');
+ });
+}).then(finishJSTest, finishJSTest);
diff --git a/deps/v8/test/webkit/fast/js/Promise-then-expected.txt b/deps/v8/test/webkit/fast/js/Promise-then-expected.txt
new file mode 100644
index 000000000..d9708075c
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-then-expected.txt
@@ -0,0 +1,17 @@
+Test Promise.prototype.then
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS thisInInit is undefined
+PASS firstPromise instanceof Promise is true
+PASS secondPromise instanceof Promise is true
+PASS thisInOnFulfilled is undefined
+PASS result is "hello"
+PASS result is "world"
+PASS rejected
+PASS result is "exception"
+PASS resolved
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-then-without-callbacks-expected.txt b/deps/v8/test/webkit/fast/js/Promise-then-without-callbacks-expected.txt
new file mode 100644
index 000000000..1e4c8ee95
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-then-without-callbacks-expected.txt
@@ -0,0 +1,9 @@
+Promise.prototype.then should work without callbacks.
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+PASS result is "hello"
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/fast/js/Promise-then-without-callbacks.js b/deps/v8/test/webkit/fast/js/Promise-then-without-callbacks.js
new file mode 100644
index 000000000..27ab4234a
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-then-without-callbacks.js
@@ -0,0 +1,35 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Promise.prototype.then should work without callbacks.');
+
+var result;
+new Promise(function(resolve) { resolve('hello'); }).then(
+ // then without callbacks
+).then(function(localResult) {
+ result = localResult;
+ shouldBeEqualToString('result', 'hello');
+ finishJSTest();
+});
diff --git a/deps/v8/test/webkit/fast/js/Promise-then.js b/deps/v8/test/webkit/fast/js/Promise-then.js
new file mode 100644
index 000000000..b9cc9887b
--- /dev/null
+++ b/deps/v8/test/webkit/fast/js/Promise-then.js
@@ -0,0 +1,68 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony
+'use strict';
+description('Test Promise.prototype.then');
+
+var resolve;
+var result;
+var thisInOnFulfilled;
+var thisInInit;
+
+var firstPromise = new Promise(function(newResolve) {
+ thisInInit = this;
+ resolve = newResolve;
+});
+
+var secondPromise = firstPromise.then(function(localResult) {
+ thisInOnFulfilled = this;
+ shouldBe('thisInOnFulfilled', 'undefined');
+ result = localResult;
+ shouldBeEqualToString('result', 'hello');
+ return 'world';
+});
+
+shouldBe('thisInInit', 'undefined');
+shouldBeTrue('firstPromise instanceof Promise');
+shouldBeTrue('secondPromise instanceof Promise');
+
+secondPromise.then(undefined, 37).then(function(localResult) {
+ result = localResult;
+ shouldBeEqualToString('result', 'world');
+ throw 'exception'
+}).then(1, 2).then(function() {
+ testFailed('resolved');
+}, function(localResult) {
+ testPassed('rejected');
+ result = localResult;
+ shouldBeEqualToString('result', 'exception');
+}).then(function() {
+ testPassed('resolved');
+ finishJSTest();
+}, function() {
+ testFailed('rejected');
+ finishJSTest();
+});
+
+resolve('hello');
diff --git a/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt b/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
index 07ecf99ce..45f71bfa6 100644
--- a/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
+++ b/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
@@ -152,21 +152,21 @@ PASS 'use strict'; function f() { arguments-- } threw exception SyntaxError: Une
PASS (function(){'use strict'; function f() { arguments-- }}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS global.eval('"use strict"; if (0) ++arguments; true;') threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS 'use strict'; ++(1, eval) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-FAIL (function(){'use strict'; ++(1, eval)}) should throw an exception. Was function (){'use strict'; ++(1, eval)}.
+PASS (function(){'use strict'; ++(1, eval)}) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
PASS 'use strict'; (1, eval)++ threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-FAIL (function(){'use strict'; (1, eval)++}) should throw an exception. Was function (){'use strict'; (1, eval)++}.
+PASS (function(){'use strict'; (1, eval)++}) threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
PASS 'use strict'; --(1, eval) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-FAIL (function(){'use strict'; --(1, eval)}) should throw an exception. Was function (){'use strict'; --(1, eval)}.
+PASS (function(){'use strict'; --(1, eval)}) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
PASS 'use strict'; (1, eval)-- threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-FAIL (function(){'use strict'; (1, eval)--}) should throw an exception. Was function (){'use strict'; (1, eval)--}.
-FAIL 'use strict'; function f() { ++(1, arguments) } should throw an exception. Was use strict.
-FAIL (function(){'use strict'; function f() { ++(1, arguments) }}) should throw an exception. Was function (){'use strict'; function f() { ++(1, arguments) }}.
-FAIL 'use strict'; function f() { (1, arguments)++ } should throw an exception. Was use strict.
-FAIL (function(){'use strict'; function f() { (1, arguments)++ }}) should throw an exception. Was function (){'use strict'; function f() { (1, arguments)++ }}.
-FAIL 'use strict'; function f() { --(1, arguments) } should throw an exception. Was use strict.
-FAIL (function(){'use strict'; function f() { --(1, arguments) }}) should throw an exception. Was function (){'use strict'; function f() { --(1, arguments) }}.
-FAIL 'use strict'; function f() { (1, arguments)-- } should throw an exception. Was use strict.
-FAIL (function(){'use strict'; function f() { (1, arguments)-- }}) should throw an exception. Was function (){'use strict'; function f() { (1, arguments)-- }}.
+PASS (function(){'use strict'; (1, eval)--}) threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
+PASS 'use strict'; function f() { ++(1, arguments) } threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
+PASS (function(){'use strict'; function f() { ++(1, arguments) }}) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
+PASS 'use strict'; function f() { (1, arguments)++ } threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
+PASS (function(){'use strict'; function f() { (1, arguments)++ }}) threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
+PASS 'use strict'; function f() { --(1, arguments) } threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
+PASS (function(){'use strict'; function f() { --(1, arguments) }}) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
+PASS 'use strict'; function f() { (1, arguments)-- } threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
+PASS (function(){'use strict'; function f() { (1, arguments)-- }}) threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
FAIL 'use strict'; if (0) delete +a.b should throw an exception. Was use strict.
FAIL (function(){'use strict'; if (0) delete +a.b}) should throw an exception. Was function (){'use strict'; if (0) delete +a.b}.
FAIL 'use strict'; if (0) delete ++a.b should throw an exception. Was use strict.
diff --git a/deps/v8/test/webkit/fast/js/function-toString-parentheses-expected.txt b/deps/v8/test/webkit/fast/js/function-toString-parentheses-expected.txt
index 943db9778..5364f846e 100644
--- a/deps/v8/test/webkit/fast/js/function-toString-parentheses-expected.txt
+++ b/deps/v8/test/webkit/fast/js/function-toString-parentheses-expected.txt
@@ -234,145 +234,145 @@ PASS compileAndSerialize('a || b || c') is 'a || b || c'
PASS compileAndSerialize('(a || b) || c') is '(a || b) || c'
PASS compileAndSerialize('a || (b || c)') is 'a || (b || c)'
PASS compileAndSerialize('a = b = c') is 'a = b = c'
-PASS compileAndSerialize('(a = b) = c') is '(a = b) = c'
+FAIL compileAndSerialize('(a = b) = c') should be (a = b) = c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b = c)') is 'a = (b = c)'
PASS compileAndSerialize('a = b + c') is 'a = b + c'
PASS compileAndSerialize('(a = b) + c') is '(a = b) + c'
PASS compileAndSerialize('a = (b + c)') is 'a = (b + c)'
-FAIL compileAndSerialize('a + b = c') should throw an exception. Was a + b = c.
-PASS compileAndSerialize('(a + b) = c') is '(a + b) = c'
+PASS compileAndSerialize('a + b = c') threw exception ReferenceError: Invalid left-hand side in assignment.
+FAIL compileAndSerialize('(a + b) = c') should be (a + b) = c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a + (b = c)') is 'a + (b = c)'
PASS compileAndSerialize('a *= b *= c') is 'a *= b *= c'
-PASS compileAndSerialize('(a *= b) *= c') is '(a *= b) *= c'
+FAIL compileAndSerialize('(a *= b) *= c') should be (a *= b) *= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a *= (b *= c)') is 'a *= (b *= c)'
PASS compileAndSerialize('a = b *= c') is 'a = b *= c'
-PASS compileAndSerialize('(a = b) *= c') is '(a = b) *= c'
+FAIL compileAndSerialize('(a = b) *= c') should be (a = b) *= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b *= c)') is 'a = (b *= c)'
PASS compileAndSerialize('a *= b + c') is 'a *= b + c'
PASS compileAndSerialize('(a *= b) + c') is '(a *= b) + c'
PASS compileAndSerialize('a *= (b + c)') is 'a *= (b + c)'
-FAIL compileAndSerialize('a + b *= c') should throw an exception. Was a + b *= c.
-PASS compileAndSerialize('(a + b) *= c') is '(a + b) *= c'
+PASS compileAndSerialize('a + b *= c') threw exception ReferenceError: Invalid left-hand side in assignment.
+FAIL compileAndSerialize('(a + b) *= c') should be (a + b) *= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a + (b *= c)') is 'a + (b *= c)'
PASS compileAndSerialize('a /= b /= c') is 'a /= b /= c'
-PASS compileAndSerialize('(a /= b) /= c') is '(a /= b) /= c'
+FAIL compileAndSerialize('(a /= b) /= c') should be (a /= b) /= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a /= (b /= c)') is 'a /= (b /= c)'
PASS compileAndSerialize('a = b /= c') is 'a = b /= c'
-PASS compileAndSerialize('(a = b) /= c') is '(a = b) /= c'
+FAIL compileAndSerialize('(a = b) /= c') should be (a = b) /= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b /= c)') is 'a = (b /= c)'
PASS compileAndSerialize('a /= b + c') is 'a /= b + c'
PASS compileAndSerialize('(a /= b) + c') is '(a /= b) + c'
PASS compileAndSerialize('a /= (b + c)') is 'a /= (b + c)'
-FAIL compileAndSerialize('a + b /= c') should throw an exception. Was a + b /= c.
-PASS compileAndSerialize('(a + b) /= c') is '(a + b) /= c'
+PASS compileAndSerialize('a + b /= c') threw exception ReferenceError: Invalid left-hand side in assignment.
+FAIL compileAndSerialize('(a + b) /= c') should be (a + b) /= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a + (b /= c)') is 'a + (b /= c)'
PASS compileAndSerialize('a %= b %= c') is 'a %= b %= c'
-PASS compileAndSerialize('(a %= b) %= c') is '(a %= b) %= c'
+FAIL compileAndSerialize('(a %= b) %= c') should be (a %= b) %= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a %= (b %= c)') is 'a %= (b %= c)'
PASS compileAndSerialize('a = b %= c') is 'a = b %= c'
-PASS compileAndSerialize('(a = b) %= c') is '(a = b) %= c'
+FAIL compileAndSerialize('(a = b) %= c') should be (a = b) %= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b %= c)') is 'a = (b %= c)'
PASS compileAndSerialize('a %= b + c') is 'a %= b + c'
PASS compileAndSerialize('(a %= b) + c') is '(a %= b) + c'
PASS compileAndSerialize('a %= (b + c)') is 'a %= (b + c)'
-FAIL compileAndSerialize('a + b %= c') should throw an exception. Was a + b %= c.
-PASS compileAndSerialize('(a + b) %= c') is '(a + b) %= c'
+PASS compileAndSerialize('a + b %= c') threw exception ReferenceError: Invalid left-hand side in assignment.
+FAIL compileAndSerialize('(a + b) %= c') should be (a + b) %= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a + (b %= c)') is 'a + (b %= c)'
PASS compileAndSerialize('a += b += c') is 'a += b += c'
-PASS compileAndSerialize('(a += b) += c') is '(a += b) += c'
+FAIL compileAndSerialize('(a += b) += c') should be (a += b) += c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a += (b += c)') is 'a += (b += c)'
PASS compileAndSerialize('a = b += c') is 'a = b += c'
-PASS compileAndSerialize('(a = b) += c') is '(a = b) += c'
+FAIL compileAndSerialize('(a = b) += c') should be (a = b) += c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b += c)') is 'a = (b += c)'
PASS compileAndSerialize('a += b + c') is 'a += b + c'
PASS compileAndSerialize('(a += b) + c') is '(a += b) + c'
PASS compileAndSerialize('a += (b + c)') is 'a += (b + c)'
-FAIL compileAndSerialize('a + b += c') should throw an exception. Was a + b += c.
-PASS compileAndSerialize('(a + b) += c') is '(a + b) += c'
+PASS compileAndSerialize('a + b += c') threw exception ReferenceError: Invalid left-hand side in assignment.
+FAIL compileAndSerialize('(a + b) += c') should be (a + b) += c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a + (b += c)') is 'a + (b += c)'
PASS compileAndSerialize('a -= b -= c') is 'a -= b -= c'
-PASS compileAndSerialize('(a -= b) -= c') is '(a -= b) -= c'
+FAIL compileAndSerialize('(a -= b) -= c') should be (a -= b) -= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a -= (b -= c)') is 'a -= (b -= c)'
PASS compileAndSerialize('a = b -= c') is 'a = b -= c'
-PASS compileAndSerialize('(a = b) -= c') is '(a = b) -= c'
+FAIL compileAndSerialize('(a = b) -= c') should be (a = b) -= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b -= c)') is 'a = (b -= c)'
PASS compileAndSerialize('a -= b + c') is 'a -= b + c'
PASS compileAndSerialize('(a -= b) + c') is '(a -= b) + c'
PASS compileAndSerialize('a -= (b + c)') is 'a -= (b + c)'
-FAIL compileAndSerialize('a + b -= c') should throw an exception. Was a + b -= c.
-PASS compileAndSerialize('(a + b) -= c') is '(a + b) -= c'
+PASS compileAndSerialize('a + b -= c') threw exception ReferenceError: Invalid left-hand side in assignment.
+FAIL compileAndSerialize('(a + b) -= c') should be (a + b) -= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a + (b -= c)') is 'a + (b -= c)'
PASS compileAndSerialize('a <<= b <<= c') is 'a <<= b <<= c'
-PASS compileAndSerialize('(a <<= b) <<= c') is '(a <<= b) <<= c'
+FAIL compileAndSerialize('(a <<= b) <<= c') should be (a <<= b) <<= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a <<= (b <<= c)') is 'a <<= (b <<= c)'
PASS compileAndSerialize('a = b <<= c') is 'a = b <<= c'
-PASS compileAndSerialize('(a = b) <<= c') is '(a = b) <<= c'
+FAIL compileAndSerialize('(a = b) <<= c') should be (a = b) <<= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b <<= c)') is 'a = (b <<= c)'
PASS compileAndSerialize('a <<= b + c') is 'a <<= b + c'
PASS compileAndSerialize('(a <<= b) + c') is '(a <<= b) + c'
PASS compileAndSerialize('a <<= (b + c)') is 'a <<= (b + c)'
-FAIL compileAndSerialize('a + b <<= c') should throw an exception. Was a + b <<= c.
-PASS compileAndSerialize('(a + b) <<= c') is '(a + b) <<= c'
+PASS compileAndSerialize('a + b <<= c') threw exception ReferenceError: Invalid left-hand side in assignment.
+FAIL compileAndSerialize('(a + b) <<= c') should be (a + b) <<= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a + (b <<= c)') is 'a + (b <<= c)'
PASS compileAndSerialize('a >>= b >>= c') is 'a >>= b >>= c'
-PASS compileAndSerialize('(a >>= b) >>= c') is '(a >>= b) >>= c'
+FAIL compileAndSerialize('(a >>= b) >>= c') should be (a >>= b) >>= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a >>= (b >>= c)') is 'a >>= (b >>= c)'
PASS compileAndSerialize('a = b >>= c') is 'a = b >>= c'
-PASS compileAndSerialize('(a = b) >>= c') is '(a = b) >>= c'
+FAIL compileAndSerialize('(a = b) >>= c') should be (a = b) >>= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b >>= c)') is 'a = (b >>= c)'
PASS compileAndSerialize('a >>= b + c') is 'a >>= b + c'
PASS compileAndSerialize('(a >>= b) + c') is '(a >>= b) + c'
PASS compileAndSerialize('a >>= (b + c)') is 'a >>= (b + c)'
-FAIL compileAndSerialize('a + b >>= c') should throw an exception. Was a + b >>= c.
-PASS compileAndSerialize('(a + b) >>= c') is '(a + b) >>= c'
+PASS compileAndSerialize('a + b >>= c') threw exception ReferenceError: Invalid left-hand side in assignment.
+FAIL compileAndSerialize('(a + b) >>= c') should be (a + b) >>= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a + (b >>= c)') is 'a + (b >>= c)'
PASS compileAndSerialize('a >>>= b >>>= c') is 'a >>>= b >>>= c'
-PASS compileAndSerialize('(a >>>= b) >>>= c') is '(a >>>= b) >>>= c'
+FAIL compileAndSerialize('(a >>>= b) >>>= c') should be (a >>>= b) >>>= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a >>>= (b >>>= c)') is 'a >>>= (b >>>= c)'
PASS compileAndSerialize('a = b >>>= c') is 'a = b >>>= c'
-PASS compileAndSerialize('(a = b) >>>= c') is '(a = b) >>>= c'
+FAIL compileAndSerialize('(a = b) >>>= c') should be (a = b) >>>= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b >>>= c)') is 'a = (b >>>= c)'
PASS compileAndSerialize('a >>>= b + c') is 'a >>>= b + c'
PASS compileAndSerialize('(a >>>= b) + c') is '(a >>>= b) + c'
PASS compileAndSerialize('a >>>= (b + c)') is 'a >>>= (b + c)'
-FAIL compileAndSerialize('a + b >>>= c') should throw an exception. Was a + b >>>= c.
-PASS compileAndSerialize('(a + b) >>>= c') is '(a + b) >>>= c'
+PASS compileAndSerialize('a + b >>>= c') threw exception ReferenceError: Invalid left-hand side in assignment.
+FAIL compileAndSerialize('(a + b) >>>= c') should be (a + b) >>>= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a + (b >>>= c)') is 'a + (b >>>= c)'
PASS compileAndSerialize('a &= b &= c') is 'a &= b &= c'
-PASS compileAndSerialize('(a &= b) &= c') is '(a &= b) &= c'
+FAIL compileAndSerialize('(a &= b) &= c') should be (a &= b) &= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a &= (b &= c)') is 'a &= (b &= c)'
PASS compileAndSerialize('a = b &= c') is 'a = b &= c'
-PASS compileAndSerialize('(a = b) &= c') is '(a = b) &= c'
+FAIL compileAndSerialize('(a = b) &= c') should be (a = b) &= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b &= c)') is 'a = (b &= c)'
PASS compileAndSerialize('a &= b + c') is 'a &= b + c'
PASS compileAndSerialize('(a &= b) + c') is '(a &= b) + c'
PASS compileAndSerialize('a &= (b + c)') is 'a &= (b + c)'
-FAIL compileAndSerialize('a + b &= c') should throw an exception. Was a + b &= c.
-PASS compileAndSerialize('(a + b) &= c') is '(a + b) &= c'
+PASS compileAndSerialize('a + b &= c') threw exception ReferenceError: Invalid left-hand side in assignment.
+FAIL compileAndSerialize('(a + b) &= c') should be (a + b) &= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a + (b &= c)') is 'a + (b &= c)'
PASS compileAndSerialize('a ^= b ^= c') is 'a ^= b ^= c'
-PASS compileAndSerialize('(a ^= b) ^= c') is '(a ^= b) ^= c'
+FAIL compileAndSerialize('(a ^= b) ^= c') should be (a ^= b) ^= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a ^= (b ^= c)') is 'a ^= (b ^= c)'
PASS compileAndSerialize('a = b ^= c') is 'a = b ^= c'
-PASS compileAndSerialize('(a = b) ^= c') is '(a = b) ^= c'
+FAIL compileAndSerialize('(a = b) ^= c') should be (a = b) ^= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b ^= c)') is 'a = (b ^= c)'
PASS compileAndSerialize('a ^= b + c') is 'a ^= b + c'
PASS compileAndSerialize('(a ^= b) + c') is '(a ^= b) + c'
PASS compileAndSerialize('a ^= (b + c)') is 'a ^= (b + c)'
-FAIL compileAndSerialize('a + b ^= c') should throw an exception. Was a + b ^= c.
-PASS compileAndSerialize('(a + b) ^= c') is '(a + b) ^= c'
+PASS compileAndSerialize('a + b ^= c') threw exception ReferenceError: Invalid left-hand side in assignment.
+FAIL compileAndSerialize('(a + b) ^= c') should be (a + b) ^= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a + (b ^= c)') is 'a + (b ^= c)'
PASS compileAndSerialize('a |= b |= c') is 'a |= b |= c'
-PASS compileAndSerialize('(a |= b) |= c') is '(a |= b) |= c'
+FAIL compileAndSerialize('(a |= b) |= c') should be (a |= b) |= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a |= (b |= c)') is 'a |= (b |= c)'
PASS compileAndSerialize('a = b |= c') is 'a = b |= c'
-PASS compileAndSerialize('(a = b) |= c') is '(a = b) |= c'
+FAIL compileAndSerialize('(a = b) |= c') should be (a = b) |= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b |= c)') is 'a = (b |= c)'
PASS compileAndSerialize('a |= b + c') is 'a |= b + c'
PASS compileAndSerialize('(a |= b) + c') is '(a |= b) + c'
PASS compileAndSerialize('a |= (b + c)') is 'a |= (b + c)'
-FAIL compileAndSerialize('a + b |= c') should throw an exception. Was a + b |= c.
-PASS compileAndSerialize('(a + b) |= c') is '(a + b) |= c'
+PASS compileAndSerialize('a + b |= c') threw exception ReferenceError: Invalid left-hand side in assignment.
+FAIL compileAndSerialize('(a + b) |= c') should be (a + b) |= c. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerialize('a + (b |= c)') is 'a + (b |= c)'
PASS compileAndSerialize('delete a + b') is 'delete a + b'
PASS compileAndSerialize('(delete a) + b') is '(delete a) + b'
@@ -391,12 +391,12 @@ PASS compileAndSerialize('!typeof a') is '!typeof a'
PASS compileAndSerialize('!(typeof a)') is '!(typeof a)'
PASS compileAndSerialize('++a + b') is '++a + b'
PASS compileAndSerialize('(++a) + b') is '(++a) + b'
-PASS compileAndSerialize('++(a + b)') is '++(a + b)'
+FAIL compileAndSerialize('++(a + b)') should be ++(a + b). Threw exception ReferenceError: Invalid left-hand side expression in prefix operation
PASS compileAndSerialize('!++a') is '!++a'
PASS compileAndSerialize('!(++a)') is '!(++a)'
PASS compileAndSerialize('--a + b') is '--a + b'
PASS compileAndSerialize('(--a) + b') is '(--a) + b'
-PASS compileAndSerialize('--(a + b)') is '--(a + b)'
+FAIL compileAndSerialize('--(a + b)') should be --(a + b). Threw exception ReferenceError: Invalid left-hand side expression in prefix operation
PASS compileAndSerialize('!--a') is '!--a'
PASS compileAndSerialize('!(--a)') is '!(--a)'
PASS compileAndSerialize('+ a + b') is '+ a + b'
@@ -421,10 +421,10 @@ PASS compileAndSerialize('!!a') is '!!a'
PASS compileAndSerialize('!(!a)') is '!(!a)'
PASS compileAndSerialize('!a++') is '!a++'
PASS compileAndSerialize('!(a++)') is '!(a++)'
-PASS compileAndSerialize('(!a)++') is '(!a)++'
+FAIL compileAndSerialize('(!a)++') should be (!a)++. Threw exception ReferenceError: Invalid left-hand side expression in postfix operation
PASS compileAndSerialize('!a--') is '!a--'
PASS compileAndSerialize('!(a--)') is '!(a--)'
-PASS compileAndSerialize('(!a)--') is '(!a)--'
+FAIL compileAndSerialize('(!a)--') should be (!a)--. Threw exception ReferenceError: Invalid left-hand side expression in postfix operation
PASS compileAndSerialize('(-1)[a]') is '(-1)[a]'
PASS compileAndSerialize('(-1)[a] = b') is '(-1)[a] = b'
PASS compileAndSerialize('(-1)[a] += b') is '(-1)[a] += b'
@@ -464,42 +464,42 @@ PASS compileAndSerialize('(1).a += b') is '(1).a += b'
PASS compileAndSerialize('(1).a++') is '(1).a++'
PASS compileAndSerialize('++(1).a') is '++(1).a'
PASS compileAndSerialize('(1).a()') is '(1).a()'
-PASS compileAndSerialize('(-1) = a') is '(-1) = a'
-PASS compileAndSerialize('(- 0) = a') is '(- 0) = a'
-PASS compileAndSerialize('1 = a') is '1 = a'
-PASS compileAndSerialize('(-1) *= a') is '(-1) *= a'
-PASS compileAndSerialize('(- 0) *= a') is '(- 0) *= a'
-PASS compileAndSerialize('1 *= a') is '1 *= a'
-PASS compileAndSerialize('(-1) /= a') is '(-1) /= a'
-PASS compileAndSerialize('(- 0) /= a') is '(- 0) /= a'
-PASS compileAndSerialize('1 /= a') is '1 /= a'
-PASS compileAndSerialize('(-1) %= a') is '(-1) %= a'
-PASS compileAndSerialize('(- 0) %= a') is '(- 0) %= a'
-PASS compileAndSerialize('1 %= a') is '1 %= a'
-PASS compileAndSerialize('(-1) += a') is '(-1) += a'
-PASS compileAndSerialize('(- 0) += a') is '(- 0) += a'
-PASS compileAndSerialize('1 += a') is '1 += a'
-PASS compileAndSerialize('(-1) -= a') is '(-1) -= a'
-PASS compileAndSerialize('(- 0) -= a') is '(- 0) -= a'
-PASS compileAndSerialize('1 -= a') is '1 -= a'
-PASS compileAndSerialize('(-1) <<= a') is '(-1) <<= a'
-PASS compileAndSerialize('(- 0) <<= a') is '(- 0) <<= a'
-PASS compileAndSerialize('1 <<= a') is '1 <<= a'
-PASS compileAndSerialize('(-1) >>= a') is '(-1) >>= a'
-PASS compileAndSerialize('(- 0) >>= a') is '(- 0) >>= a'
-PASS compileAndSerialize('1 >>= a') is '1 >>= a'
-PASS compileAndSerialize('(-1) >>>= a') is '(-1) >>>= a'
-PASS compileAndSerialize('(- 0) >>>= a') is '(- 0) >>>= a'
-PASS compileAndSerialize('1 >>>= a') is '1 >>>= a'
-PASS compileAndSerialize('(-1) &= a') is '(-1) &= a'
-PASS compileAndSerialize('(- 0) &= a') is '(- 0) &= a'
-PASS compileAndSerialize('1 &= a') is '1 &= a'
-PASS compileAndSerialize('(-1) ^= a') is '(-1) ^= a'
-PASS compileAndSerialize('(- 0) ^= a') is '(- 0) ^= a'
-PASS compileAndSerialize('1 ^= a') is '1 ^= a'
-PASS compileAndSerialize('(-1) |= a') is '(-1) |= a'
-PASS compileAndSerialize('(- 0) |= a') is '(- 0) |= a'
-PASS compileAndSerialize('1 |= a') is '1 |= a'
+FAIL compileAndSerialize('(-1) = a') should be (-1) = a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(- 0) = a') should be (- 0) = a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('1 = a') should be 1 = a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(-1) *= a') should be (-1) *= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(- 0) *= a') should be (- 0) *= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('1 *= a') should be 1 *= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(-1) /= a') should be (-1) /= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(- 0) /= a') should be (- 0) /= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('1 /= a') should be 1 /= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(-1) %= a') should be (-1) %= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(- 0) %= a') should be (- 0) %= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('1 %= a') should be 1 %= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(-1) += a') should be (-1) += a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(- 0) += a') should be (- 0) += a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('1 += a') should be 1 += a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(-1) -= a') should be (-1) -= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(- 0) -= a') should be (- 0) -= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('1 -= a') should be 1 -= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(-1) <<= a') should be (-1) <<= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(- 0) <<= a') should be (- 0) <<= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('1 <<= a') should be 1 <<= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(-1) >>= a') should be (-1) >>= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(- 0) >>= a') should be (- 0) >>= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('1 >>= a') should be 1 >>= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(-1) >>>= a') should be (-1) >>>= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(- 0) >>>= a') should be (- 0) >>>= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('1 >>>= a') should be 1 >>>= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(-1) &= a') should be (-1) &= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(- 0) &= a') should be (- 0) &= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('1 &= a') should be 1 &= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(-1) ^= a') should be (-1) ^= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(- 0) ^= a') should be (- 0) ^= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('1 ^= a') should be 1 ^= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(-1) |= a') should be (-1) |= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(- 0) |= a') should be (- 0) |= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('1 |= a') should be 1 |= a. Threw exception ReferenceError: Invalid left-hand side in assignment
PASS compileAndSerializeLeftmostTest('({ }).x') is '({ }).x'
PASS compileAndSerializeLeftmostTest('x = { }') is 'x = { }'
PASS compileAndSerializeLeftmostTest('(function () { })()') is '(function () { })()'
diff --git a/deps/v8/test/webkit/fast/js/kde/exception_propagation-expected.txt b/deps/v8/test/webkit/fast/js/kde/exception_propagation-expected.txt
index 433c66a66..ab717b8f4 100644
--- a/deps/v8/test/webkit/fast/js/kde/exception_propagation-expected.txt
+++ b/deps/v8/test/webkit/fast/js/kde/exception_propagation-expected.txt
@@ -88,7 +88,6 @@ PASS for_val_part3_throw2 is 4
PASS for_val_part1_throwbody is 1
PASS for_val_part2_throwbody is 1
PASS for_val_part3_throwbody is 4
-PASS forin_count is 4
PASS set_inside_with_throw is 4
PASS set_inside_with_cantconverttoobject is 4
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/fast/js/kde/exception_propagation.js b/deps/v8/test/webkit/fast/js/kde/exception_propagation.js
index 9d6e753bc..80ece4bad 100644
--- a/deps/v8/test/webkit/fast/js/kde/exception_propagation.js
+++ b/deps/v8/test/webkit/fast/js/kde/exception_propagation.js
@@ -402,26 +402,6 @@ catch (e) {
shouldBe("for_val_part3_throwbody","4");
// ---------------------------------
-var forin_test_obj = new Object();
-forin_test_obj.a = 1;
-forin_test_obj.b = 2;
-forin_test_obj.c = 3;
-var forin_count = 4;
-function forin_lexpr() {
-// if (forincount == 1);
-// throwex();
- return new Object();
-}
-try {
- for (throwex() in forin_test_obj) {
- forin_count++;
- }
-}
-catch (e) {
-}
-shouldBe("forin_count","4");
-
-// ---------------------------------
var set_inside_with_throw = 4;
try {
with (throwex()) {
diff --git a/deps/v8/test/webkit/fast/js/modify-non-references-expected.txt b/deps/v8/test/webkit/fast/js/modify-non-references-expected.txt
index ea20652c0..b8c692aa8 100644
--- a/deps/v8/test/webkit/fast/js/modify-non-references-expected.txt
+++ b/deps/v8/test/webkit/fast/js/modify-non-references-expected.txt
@@ -21,12 +21,12 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-PASS function f() { g()++; } f.toString() is 'function f() { g()++; }'
-PASS function f() { g()--; } f.toString() is 'function f() { g()--; }'
-PASS function f() { ++g(); } f.toString() is 'function f() { ++g(); }'
-PASS function f() { --g(); } f.toString() is 'function f() { --g(); }'
-PASS function f() { g() = 1; } f.toString() is 'function f() { g() = 1; }'
-PASS function f() { g() += 1; } f.toString() is 'function f() { g() += 1; }'
+FAIL function f() { g()++; } f.toString() should be function f() { g()++; }. Threw exception ReferenceError: Invalid left-hand side expression in postfix operation
+FAIL function f() { g()--; } f.toString() should be function f() { g()--; }. Threw exception ReferenceError: Invalid left-hand side expression in postfix operation
+FAIL function f() { ++g(); } f.toString() should be function f() { ++g(); }. Threw exception ReferenceError: Invalid left-hand side expression in prefix operation
+FAIL function f() { --g(); } f.toString() should be function f() { --g(); }. Threw exception ReferenceError: Invalid left-hand side expression in prefix operation
+FAIL function f() { g() = 1; } f.toString() should be function f() { g() = 1; }. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL function f() { g() += 1; } f.toString() should be function f() { g() += 1; }. Threw exception ReferenceError: Invalid left-hand side in assignment
FAIL g()++ should throw ReferenceError: Postfix ++ operator applied to value that is not a reference.. Threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
FAIL g()-- should throw ReferenceError: Postfix -- operator applied to value that is not a reference.. Threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
FAIL ++g() should throw ReferenceError: Prefix ++ operator applied to value that is not a reference.. Threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
diff --git a/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt b/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt
index 32819e728..503bec968 100644
--- a/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt
+++ b/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt
@@ -442,8 +442,8 @@ PASS Valid: "for ((++a) in b) break"
PASS Valid: "function f() { for ((++a) in b) break }"
FAIL Invalid: "for (a, b in c) break" should throw undefined
FAIL Invalid: "function f() { for (a, b in c) break }" should throw undefined
-PASS Invalid: "for (a,b in c ;;) break"
-PASS Invalid: "function f() { for (a,b in c ;;) break }"
+FAIL Invalid: "for (a,b in c ;;) break" should throw undefined
+FAIL Invalid: "function f() { for (a,b in c ;;) break }" should throw undefined
PASS Valid: "for (a,(b in c) ;;) break"
PASS Valid: "function f() { for (a,(b in c) ;;) break }"
PASS Valid: "for ((a, b) in c) break"
diff --git a/deps/v8/test/webkit/parser-xml-close-comment-expected.txt b/deps/v8/test/webkit/parser-xml-close-comment-expected.txt
index 6f9716bc3..19ff53fc1 100644
--- a/deps/v8/test/webkit/parser-xml-close-comment-expected.txt
+++ b/deps/v8/test/webkit/parser-xml-close-comment-expected.txt
@@ -26,9 +26,9 @@ Test to ensure correct handling of --> as a single line comment when at the begi
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-PASS 'should be a syntax error' --> threw exception SyntaxError: Unexpected end of input.
-PASS /**/ 1 --> threw exception SyntaxError: Unexpected end of input.
-PASS 1 /**/ --> threw exception SyntaxError: Unexpected end of input.
+PASS 'should be a syntax error' --> threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
+PASS /**/ 1 --> threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
+PASS 1 /**/ --> threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
PASS 1/*
*/--> threw exception SyntaxError: Unexpected token >.
PASS --> is undefined.
diff --git a/deps/v8/test/webkit/string-trim-expected.txt b/deps/v8/test/webkit/string-trim-expected.txt
index 9540f1c8d..6472f89d0 100644
--- a/deps/v8/test/webkit/string-trim-expected.txt
+++ b/deps/v8/test/webkit/string-trim-expected.txt
@@ -89,20 +89,38 @@ PASS whitespace[19].s.trimRight() is ''
PASS whitespace[20].s.trim() is ''
PASS whitespace[20].s.trimLeft() is ''
PASS whitespace[20].s.trimRight() is ''
-PASS whitespace[21].s.trim() is ''
-PASS whitespace[21].s.trimLeft() is ''
-PASS whitespace[21].s.trimRight() is ''
-PASS wsString.trim() is ''
-PASS wsString.trimLeft() is ''
-PASS wsString.trimRight() is ''
-PASS trimString.trim() is testString
-PASS trimString.trimLeft() is leftTrimString
-PASS trimString.trimRight() is rightTrimString
-PASS leftTrimString.trim() is testString
+FAIL whitespace[21].s.trim() should be . Was ​.
+FAIL whitespace[21].s.trimLeft() should be . Was ​.
+FAIL whitespace[21].s.trimRight() should be . Was ​.
+FAIL wsString.trim() should be . Was ​.
+FAIL wsString.trimLeft() should be . Was ​.
+FAIL wsString.trimRight() should be . Was
+
+              

​.
+FAIL trimString.trim() should be foo bar. Was ​foo bar
+
+              

​.
+FAIL trimString.trimLeft() should be foo bar
+
+              

​. Was ​foo bar
+
+              

​.
+FAIL trimString.trimRight() should be
+
+              

​foo bar. Was
+
+              

​foo bar
+
+              

​.
+FAIL leftTrimString.trim() should be foo bar. Was foo bar
+
+              

​.
PASS leftTrimString.trimLeft() is leftTrimString
-PASS leftTrimString.trimRight() is testString
-PASS rightTrimString.trim() is testString
-PASS rightTrimString.trimLeft() is testString
+FAIL leftTrimString.trimRight() should be foo bar. Was foo bar
+
+              

​.
+FAIL rightTrimString.trim() should be foo bar. Was ​foo bar.
+FAIL rightTrimString.trimLeft() should be foo bar. Was ​foo bar.
PASS rightTrimString.trimRight() is rightTrimString
PASS trim.call(0) is '0'
PASS trimLeft.call(0) is '0'
diff --git a/deps/v8/test/webkit/toString-prefix-postfix-preserve-parens-expected.txt b/deps/v8/test/webkit/toString-prefix-postfix-preserve-parens-expected.txt
index e4841b2e9..f9f4a66a3 100644
--- a/deps/v8/test/webkit/toString-prefix-postfix-preserve-parens-expected.txt
+++ b/deps/v8/test/webkit/toString-prefix-postfix-preserve-parens-expected.txt
@@ -26,42 +26,6 @@ This test checks that toString() round-trip on a function that has prefix, postf
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-PASS unevalf(eval(unevalf(prefix_should_preserve_parens))) is unevalf(prefix_should_preserve_parens)
-PASS /.*\(+x\)*, y\)/.test(unevalf(prefix_should_preserve_parens)) is true
-PASS prefix_should_preserve_parens(1, 2, 3); threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS eval(unevalf(prefix_should_preserve_parens))(1, 2, 3); threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS unevalf(eval(unevalf(postfix_should_preserve_parens))) is unevalf(postfix_should_preserve_parens)
-PASS /.*\(+x\)*, y\)/.test(unevalf(postfix_should_preserve_parens)) is true
-PASS postfix_should_preserve_parens(1, 2, 3); threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS eval(unevalf(postfix_should_preserve_parens))(1, 2, 3); threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS unevalf(eval(unevalf(both_should_preserve_parens))) is unevalf(both_should_preserve_parens)
-PASS /.*\(+x\)*, y\)/.test(unevalf(both_should_preserve_parens)) is true
-PASS both_should_preserve_parens(1, 2, 3); threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS eval(unevalf(both_should_preserve_parens))(1, 2, 3); threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS unevalf(eval(unevalf(prefix_should_preserve_parens_multi))) is unevalf(prefix_should_preserve_parens_multi)
-PASS /.*\(+x\)*, y\)/.test(unevalf(prefix_should_preserve_parens_multi)) is true
-PASS prefix_should_preserve_parens_multi(1, 2, 3); threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS eval(unevalf(prefix_should_preserve_parens_multi))(1, 2, 3); threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS unevalf(eval(unevalf(postfix_should_preserve_parens_multi))) is unevalf(postfix_should_preserve_parens_multi)
-PASS /.*\(+x\)*, y\)/.test(unevalf(postfix_should_preserve_parens_multi)) is true
-PASS postfix_should_preserve_parens_multi(1, 2, 3); threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS eval(unevalf(postfix_should_preserve_parens_multi))(1, 2, 3); threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS unevalf(eval(unevalf(prefix_should_preserve_parens_multi1))) is unevalf(prefix_should_preserve_parens_multi1)
-PASS /.*\(+x\)*, y\)/.test(unevalf(prefix_should_preserve_parens_multi1)) is true
-PASS prefix_should_preserve_parens_multi1(1, 2, 3); threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS eval(unevalf(prefix_should_preserve_parens_multi1))(1, 2, 3); threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS unevalf(eval(unevalf(postfix_should_preserve_parens_multi1))) is unevalf(postfix_should_preserve_parens_multi1)
-PASS /.*\(+x\)*, y\)/.test(unevalf(postfix_should_preserve_parens_multi1)) is true
-PASS postfix_should_preserve_parens_multi1(1, 2, 3); threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS eval(unevalf(postfix_should_preserve_parens_multi1))(1, 2, 3); threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS unevalf(eval(unevalf(prefix_should_preserve_parens_multi2))) is unevalf(prefix_should_preserve_parens_multi2)
-PASS /.*\(+x\)*, y\)/.test(unevalf(prefix_should_preserve_parens_multi2)) is true
-PASS prefix_should_preserve_parens_multi2(1, 2, 3); threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS eval(unevalf(prefix_should_preserve_parens_multi2))(1, 2, 3); threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS unevalf(eval(unevalf(postfix_should_preserve_parens_multi2))) is unevalf(postfix_should_preserve_parens_multi2)
-PASS /.*\(+x\)*, y\)/.test(unevalf(postfix_should_preserve_parens_multi2)) is true
-PASS postfix_should_preserve_parens_multi2(1, 2, 3); threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS eval(unevalf(postfix_should_preserve_parens_multi2))(1, 2, 3); threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
PASS unevalf(eval(unevalf(typeof_should_preserve_parens))) is unevalf(typeof_should_preserve_parens)
PASS /.*\(+x\)*, y\)/.test(unevalf(typeof_should_preserve_parens)) is true
PASS typeof_should_preserve_parens('a', 1); is 'number'
diff --git a/deps/v8/test/webkit/toString-prefix-postfix-preserve-parens.js b/deps/v8/test/webkit/toString-prefix-postfix-preserve-parens.js
index 192f278a1..f114a0252 100644
--- a/deps/v8/test/webkit/toString-prefix-postfix-preserve-parens.js
+++ b/deps/v8/test/webkit/toString-prefix-postfix-preserve-parens.js
@@ -25,6 +25,8 @@ description(
"This test checks that toString() round-trip on a function that has prefix, postfix and typeof operators applied to group expression will not remove the grouping. Also checks that evaluation of such a expression produces run-time exception"
);
+/* These have become obsolete, since they are not syntactically well-formed ES5+.
+
function postfix_should_preserve_parens(x, y, z) {
(x, y)++;
return y;
@@ -78,6 +80,7 @@ function postfix_should_preserve_parens_multi2(x, y, z) {
(((x), y) ,z)++;
return x;
}
+*/
// if these return a variable (such as y) instead of
// the result of typeof, this means that the parenthesis
@@ -138,6 +141,7 @@ function testToStringAndReturn(fn, p1, p2, retval)
}
+/*
testToStringAndRTFailure("prefix_should_preserve_parens");
testToStringAndRTFailure("postfix_should_preserve_parens");
testToStringAndRTFailure("both_should_preserve_parens");
@@ -147,6 +151,7 @@ testToStringAndRTFailure("prefix_should_preserve_parens_multi1");
testToStringAndRTFailure("postfix_should_preserve_parens_multi1");
testToStringAndRTFailure("prefix_should_preserve_parens_multi2");
testToStringAndRTFailure("postfix_should_preserve_parens_multi2");
+*/
testToStringAndReturn("typeof_should_preserve_parens", "'a'", 1, "'number'");
testToStringAndReturn("typeof_should_preserve_parens1", "'a'", 1, "'number'");
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index 8400ba76e..8ae5e3dfc 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -30,6 +30,9 @@
# BUG(237872). TODO(bmeurer): Investigate.
'string-replacement-outofmemory': [FAIL],
+ # TODO(rossberg): Awaiting spec resolution (https://bugs.ecmascript.org/show_bug.cgi?id=2566)
+ 'fast/js/Promise-then': [FAIL],
+
##############################################################################
# Flaky tests.
# BUG(v8:2989).
@@ -48,4 +51,7 @@
['simulator', {
'function-apply-aliased': [SKIP],
}], # 'simulator'
+['arch == arm64 and simulator_run == True', {
+ 'dfg-int-overflow-in-loop': [SKIP],
+}], # 'arch == arm64 and simulator_run == True'
]
diff --git a/deps/v8/tools/bash-completion.sh b/deps/v8/tools/bash-completion.sh
index 9f65c6773..6e324246d 100755
--- a/deps/v8/tools/bash-completion.sh
+++ b/deps/v8/tools/bash-completion.sh
@@ -37,7 +37,7 @@ v8_source=$(readlink -f $(dirname $BASH_SOURCE)/..)
_v8_flag() {
local cur defines targets
cur="${COMP_WORDS[COMP_CWORD]}"
- defines=$(cat src/flag-definitions.h \
+ defines=$(cat $v8_source/src/flag-definitions.h \
| grep "^DEFINE" \
| grep -v "DEFINE_implication" \
| sed -e 's/_/-/g')
@@ -45,7 +45,7 @@ _v8_flag() {
| sed -ne 's/^DEFINE-[^(]*(\([^,]*\).*/--\1/p'; \
echo "$defines" \
| sed -ne 's/^DEFINE-bool(\([^,]*\).*/--no\1/p'; \
- cat src/d8.cc \
+ cat $v8_source/src/d8.cc \
| grep "strcmp(argv\[i\]" \
| sed -ne 's/^[^"]*"--\([^"]*\)".*/--\1/p')
COMPREPLY=($(compgen -W "$targets" -- "$cur"))
diff --git a/deps/v8/tools/blink_tests/TestExpectations b/deps/v8/tools/blink_tests/TestExpectations
index 039a918a0..530f85347 100644
--- a/deps/v8/tools/blink_tests/TestExpectations
+++ b/deps/v8/tools/blink_tests/TestExpectations
@@ -26,5 +26,3 @@ crbug.com/178745 [ Win Debug ] plugins/open-and-close-window-with-plugin.html [
crbug.com/249894 [ Linux Debug ] fast/js/regress/inline-arguments-access.html [ Pass Failure Crash Slow ]
[ Linux Debug ] fast/js/regress/inline-arguments-local-escape.html [ Slow ]
-# This test is temporarily disabled in Blink, too.
-crbug.com/340639 fast/js/reserved-words-as-property.html [ Pass Failure ]
diff --git a/deps/v8/tools/cross_build_gcc.sh b/deps/v8/tools/cross_build_gcc.sh
new file mode 100755
index 000000000..e3603cc78
--- /dev/null
+++ b/deps/v8/tools/cross_build_gcc.sh
@@ -0,0 +1,72 @@
+#!/bin/sh
+#
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+if [ "$#" -lt 1 ]; then
+ echo "Usage: tools/cross_build_gcc.sh <GCC prefix> [make arguments ...]"
+ exit 1
+fi
+
+export CXX=$1g++
+export AR=$1ar
+export RANLIB=$1ranlib
+export CC=$1gcc
+export LD=$1g++
+export LINK=$1g++
+
+OK=1
+if [ ! -x "$CXX" ]; then
+ echo "Error: $CXX does not exist or is not executable."
+ OK=0
+fi
+if [ ! -x "$AR" ]; then
+ echo "Error: $AR does not exist or is not executable."
+ OK=0
+fi
+if [ ! -x "$RANLIB" ]; then
+ echo "Error: $RANLIB does not exist or is not executable."
+ OK=0
+fi
+if [ ! -x "$CC" ]; then
+ echo "Error: $CC does not exist or is not executable."
+ OK=0
+fi
+if [ ! -x "$LD" ]; then
+ echo "Error: $LD does not exist or is not executable."
+ OK=0
+fi
+if [ ! -x "$LINK" ]; then
+ echo "Error: $LINK does not exist or is not executable."
+ OK=0
+fi
+if [ $OK -ne 1 ]; then
+ exit 1
+fi
+
+shift
+make snapshot=off $@
diff --git a/deps/v8/tools/draw_instruction_graph.sh b/deps/v8/tools/draw_instruction_graph.sh
new file mode 100755
index 000000000..549380b8f
--- /dev/null
+++ b/deps/v8/tools/draw_instruction_graph.sh
@@ -0,0 +1,130 @@
+#!/bin/bash
+#
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script reads in CSV formatted instruction data, and draws a stacked
+# graph in png format.
+
+defaultfile=arm64_inst.csv
+defaultout=arm64_inst.png
+gnuplot=/usr/bin/gnuplot
+
+
+# File containing CSV instruction data from simulator.
+file=${1:-$defaultfile}
+
+# Output graph png file.
+out=${2:-$defaultout}
+
+# Check input file exists.
+if [ ! -e $file ]; then
+ echo "Input file not found: $file."
+ echo "Usage: draw_instruction_graph.sh <input csv> <output png>"
+ exit 1
+fi
+
+# Search for an error message, and if found, exit.
+error=`grep -m1 '# Error:' $file`
+if [ -n "$error" ]; then
+ echo "Error message in input file:"
+ echo " $error"
+ exit 2
+fi
+
+# Sample period - period over which numbers for each category of instructions is
+# counted.
+sp=`grep -m1 '# sample_period=' $file | cut -d= -f2`
+
+# Get number of counters in the CSV file.
+nc=`grep -m1 '# counters=' $file | cut -d= -f2`
+
+# Find the annotation arrows. They appear as comments in the CSV file, in the
+# format:
+# # xx @ yyyyy
+# Where xx is a two character annotation identifier, and yyyyy is the
+# position in the executed instruction stream that generated the annotation.
+# Turn these locations into labelled arrows.
+arrows=`sed '/^[^#]/ d' $file | \
+ perl -pe "s/^# .. @ (\d+)/set arrow from \1, graph 0.9 to \1, $sp/"`;
+labels=`sed '/^[^#]/d' $file | \
+ sed -r 's/^# (..) @ (.+)/set label at \2, graph 0.9 "\1" \
+ center offset 0,0.5 font "FreeSans, 8"/'`;
+
+# Check for gnuplot, and warn if not available.
+if [ ! -e $gnuplot ]; then
+ echo "Can't find gnuplot at $gnuplot."
+ echo "Gnuplot version 4.6.3 or later required."
+ exit 3
+fi
+
+# Initialise gnuplot, and give it the data to draw.
+echo | $gnuplot <<EOF
+$arrows
+$labels
+MAXCOL=$nc
+set term png size 1920, 800 #ffffff
+set output '$out'
+set datafile separator ','
+set xtics font 'FreeSans, 10'
+set xlabel 'Instructions' font 'FreeSans, 10'
+set ytics font 'FreeSans, 10'
+set yrange [0:*]
+set key outside font 'FreeSans, 8'
+
+set style line 2 lc rgb '#800000'
+set style line 3 lc rgb '#d00000'
+set style line 4 lc rgb '#ff6000'
+set style line 5 lc rgb '#ffc000'
+set style line 6 lc rgb '#ffff00'
+
+set style line 7 lc rgb '#ff00ff'
+set style line 8 lc rgb '#ffc0ff'
+
+set style line 9 lc rgb '#004040'
+set style line 10 lc rgb '#008080'
+set style line 11 lc rgb '#40c0c0'
+set style line 12 lc rgb '#c0f0f0'
+
+set style line 13 lc rgb '#004000'
+set style line 14 lc rgb '#008000'
+set style line 15 lc rgb '#40c040'
+set style line 16 lc rgb '#c0f0c0'
+
+set style line 17 lc rgb '#2020f0'
+set style line 18 lc rgb '#6060f0'
+set style line 19 lc rgb '#a0a0f0'
+
+set style line 20 lc rgb '#000000'
+set style line 21 lc rgb '#ffffff'
+
+plot for [i=2:MAXCOL] '$file' using 1:(sum [col=i:MAXCOL] column(col)) \
+title columnheader(i) with filledcurve y1=0 ls i
+EOF
+
+
+
diff --git a/deps/v8/tools/gcmole/gcmole.lua b/deps/v8/tools/gcmole/gcmole.lua
index aa9324756..cd91a913d 100644
--- a/deps/v8/tools/gcmole/gcmole.lua
+++ b/deps/v8/tools/gcmole/gcmole.lua
@@ -66,7 +66,7 @@ for i = 1, #arg do
end
end
-local ARCHS = ARGS[1] and { ARGS[1] } or { 'ia32', 'arm', 'x64' }
+local ARCHS = ARGS[1] and { ARGS[1] } or { 'ia32', 'arm', 'x64', 'arm64' }
local io = require "io"
local os = require "os"
@@ -196,7 +196,9 @@ local ARCHITECTURES = {
arm = config { triple = "i586-unknown-linux",
arch_define = "V8_TARGET_ARCH_ARM" },
x64 = config { triple = "x86_64-unknown-linux",
- arch_define = "V8_TARGET_ARCH_X64" }
+ arch_define = "V8_TARGET_ARCH_X64" },
+ arm64 = config { triple = "x86_64-unknown-linux",
+ arch_define = "V8_TARGET_ARCH_ARM64" },
}
-------------------------------------------------------------------------------
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index a5a2ae08a..8178b2f0c 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -27,7 +27,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import BaseHTTPServer
import bisect
+import cgi
import cmd
import codecs
import ctypes
@@ -37,10 +39,15 @@ import mmap
import optparse
import os
import re
-import struct
import sys
import types
+import urllib
+import urlparse
import v8heapconst
+import webbrowser
+
+PORT_NUMBER = 8081
+
USAGE="""usage: %prog [OPTIONS] [DUMP-FILE]
@@ -701,6 +708,20 @@ class MinidumpReader(object):
reader.FormatIntPtr(word))
self.ForEachMemoryRegion(search_inside_region)
+ def FindWordList(self, word):
+ aligned_res = []
+ unaligned_res = []
+ def search_inside_region(reader, start, size, location):
+ for loc in xrange(location, location + size - self.PointerSize()):
+ if reader._ReadWord(loc) == word:
+ slot = start + (loc - location)
+ if slot % self.PointerSize() == 0:
+ aligned_res.append(slot)
+ else:
+ unaligned_res.append(slot)
+ self.ForEachMemoryRegion(search_inside_region)
+ return (aligned_res, unaligned_res)
+
def FindLocation(self, address):
offset = 0
if self.memory_list64 is not None:
@@ -930,8 +951,9 @@ class HeapObject(object):
def SmiField(self, offset):
field_value = self.heap.reader.ReadUIntPtr(self.address + offset)
- assert (field_value & 1) == 0
- return field_value / 2
+ if (field_value & 1) == 0:
+ return field_value / 2
+ return None
class Map(HeapObject):
@@ -1348,7 +1370,9 @@ class JSFunction(HeapObject):
if not self.shared.script.Is(Script): return source
script_source = self.shared.script.source
if not script_source.Is(String): return source
- return script_source.GetChars()[start:end]
+ if start and end:
+ source = script_source.GetChars()[start:end]
+ return source
class SharedFunctionInfo(HeapObject):
@@ -1382,7 +1406,10 @@ class SharedFunctionInfo(HeapObject):
else:
start_position_and_type = \
self.SmiField(self.StartPositionAndTypeOffset())
- self.start_position = start_position_and_type >> 2
+ if start_position_and_type:
+ self.start_position = start_position_and_type >> 2
+ else:
+ self.start_position = None
self.end_position = \
self.SmiField(self.EndPositionOffset())
@@ -1561,6 +1588,79 @@ class KnownMap(HeapObject):
return "<%s>" % self.known_name
+COMMENT_RE = re.compile(r"^C (0x[0-9a-fA-F]+) (.*)$")
+PAGEADDRESS_RE = re.compile(
+ r"^P (mappage|pointerpage|datapage) (0x[0-9a-fA-F]+)$")
+
+
+class InspectionInfo(object):
+ def __init__(self, minidump_name, reader):
+ self.comment_file = minidump_name + ".comments"
+ self.address_comments = {}
+ self.page_address = {}
+ if os.path.exists(self.comment_file):
+ with open(self.comment_file, "r") as f:
+ lines = f.readlines()
+ f.close()
+
+ for l in lines:
+ m = COMMENT_RE.match(l)
+ if m:
+ self.address_comments[int(m.group(1), 0)] = m.group(2)
+ m = PAGEADDRESS_RE.match(l)
+ if m:
+ self.page_address[m.group(1)] = int(m.group(2), 0)
+ self.reader = reader
+ self.styles = {}
+ self.color_addresses()
+ return
+
+ def get_page_address(self, page_kind):
+ return self.page_address.get(page_kind, 0)
+
+ def save_page_address(self, page_kind, address):
+ with open(self.comment_file, "a") as f:
+ f.write("P %s 0x%x\n" % (page_kind, address))
+ f.close()
+
+ def color_addresses(self):
+ # Color all stack addresses.
+ exception_thread = self.reader.thread_map[self.reader.exception.thread_id]
+ stack_top = self.reader.ExceptionSP()
+ stack_bottom = exception_thread.stack.start + \
+ exception_thread.stack.memory.data_size
+ frame_pointer = self.reader.ExceptionFP()
+ self.styles[frame_pointer] = "frame"
+ for slot in xrange(stack_top, stack_bottom, self.reader.PointerSize()):
+ self.styles[slot] = "stackaddress"
+ for slot in xrange(stack_top, stack_bottom, self.reader.PointerSize()):
+ maybe_address = self.reader.ReadUIntPtr(slot)
+ self.styles[maybe_address] = "stackval"
+ if slot == frame_pointer:
+ self.styles[slot] = "frame"
+ frame_pointer = maybe_address
+ self.styles[self.reader.ExceptionIP()] = "pc"
+
+ def get_style_class(self, address):
+ return self.styles.get(address, None)
+
+ def get_style_class_string(self, address):
+ style = self.get_style_class(address)
+ if style != None:
+ return " class=\"%s\" " % style
+ else:
+ return ""
+
+ def set_comment(self, address, comment):
+ self.address_comments[address] = comment
+ with open(self.comment_file, "a") as f:
+ f.write("C 0x%x %s\n" % (address, comment))
+ f.close()
+
+ def get_comment(self, address):
+ return self.address_comments.get(address, "")
+
+
class InspectionPadawan(object):
"""The padawan can improve annotations by sensing well-known objects."""
def __init__(self, reader, heap):
@@ -1653,6 +1753,1033 @@ class InspectionPadawan(object):
self.reader.FormatIntPtr(self.known_first_data_page),
self.reader.FormatIntPtr(self.known_first_pointer_page))
+WEB_HEADER = """
+<!DOCTYPE html>
+<html>
+<head>
+<meta content="text/html; charset=utf-8" http-equiv="content-type">
+<style media="screen" type="text/css">
+
+.code {
+ font-family: monospace;
+}
+
+.dmptable {
+ border-collapse : collapse;
+ border-spacing : 0px;
+}
+
+.codedump {
+ border-collapse : collapse;
+ border-spacing : 0px;
+}
+
+.addrcomments {
+ border : 0px;
+}
+
+.register {
+ padding-right : 1em;
+}
+
+.header {
+ clear : both;
+}
+
+.header .navigation {
+ float : left;
+}
+
+.header .dumpname {
+ float : right;
+}
+
+tr.highlight-line {
+ background-color : yellow;
+}
+
+.highlight {
+ background-color : magenta;
+}
+
+tr.inexact-highlight-line {
+ background-color : pink;
+}
+
+input {
+ background-color: inherit;
+ border: 1px solid LightGray;
+}
+
+.dumpcomments {
+ border : 1px solid LightGray;
+ width : 32em;
+}
+
+.regions td {
+ padding:0 15px 0 15px;
+}
+
+.stackframe td {
+ background-color : cyan;
+}
+
+.stackaddress {
+ background-color : LightGray;
+}
+
+.stackval {
+ background-color : LightCyan;
+}
+
+.frame {
+ background-color : cyan;
+}
+
+.commentinput {
+ width : 20em;
+}
+
+a.nodump:visited {
+ color : black;
+ text-decoration : none;
+}
+
+a.nodump:link {
+ color : black;
+ text-decoration : none;
+}
+
+a:visited {
+ color : blueviolet;
+}
+
+a:link {
+ color : blue;
+}
+
+.disasmcomment {
+ color : DarkGreen;
+}
+
+</style>
+
+<script type="application/javascript">
+
+var address_str = "address-";
+var address_len = address_str.length;
+
+function comment() {
+ var s = event.srcElement.id;
+ var index = s.indexOf(address_str);
+ if (index >= 0) {
+ send_comment(s.substring(index + address_len), event.srcElement.value);
+ }
+}
+
+function send_comment(address, comment) {
+ xmlhttp = new XMLHttpRequest();
+ address = encodeURIComponent(address)
+ comment = encodeURIComponent(comment)
+ xmlhttp.open("GET",
+ "setcomment?%(query_dump)s&address=" + address +
+ "&comment=" + comment, true);
+ xmlhttp.send();
+}
+
+var dump_str = "dump-";
+var dump_len = dump_str.length;
+
+function dump_comment() {
+ var s = event.srcElement.id;
+ var index = s.indexOf(dump_str);
+ if (index >= 0) {
+ send_dump_desc(s.substring(index + dump_len), event.srcElement.value);
+ }
+}
+
+function send_dump_desc(name, desc) {
+ xmlhttp = new XMLHttpRequest();
+ name = encodeURIComponent(name)
+ desc = encodeURIComponent(desc)
+ xmlhttp.open("GET",
+ "setdumpdesc?dump=" + name +
+ "&description=" + desc, true);
+ xmlhttp.send();
+}
+
+function onpage(kind, address) {
+ xmlhttp = new XMLHttpRequest();
+ kind = encodeURIComponent(kind)
+ address = encodeURIComponent(address)
+ xmlhttp.onreadystatechange = function() {
+ if (xmlhttp.readyState==4 && xmlhttp.status==200) {
+ location.reload(true)
+ }
+ };
+ xmlhttp.open("GET",
+ "setpageaddress?%(query_dump)s&kind=" + kind +
+ "&address=" + address);
+ xmlhttp.send();
+}
+
+</script>
+
+<title>Dump %(dump_name)s</title>
+</head>
+
+<body>
+ <div class="header">
+ <form class="navigation" action="search.html">
+ <a href="summary.html?%(query_dump)s">Context info</a>&nbsp;&nbsp;&nbsp;
+ <a href="info.html?%(query_dump)s">Dump info</a>&nbsp;&nbsp;&nbsp;
+ <a href="modules.html?%(query_dump)s">Modules</a>&nbsp;&nbsp;&nbsp;
+ &nbsp;
+ <input type="search" name="val">
+ <input type="submit" name="search" value="Search">
+ <input type="hidden" name="dump" value="%(dump_name)s">
+ </form>
+ <form class="navigation" action="disasm.html#highlight">
+ &nbsp;
+ &nbsp;
+ &nbsp;
+ <input type="search" name="val">
+ <input type="submit" name="disasm" value="Disasm">
+ &nbsp;
+ &nbsp;
+ &nbsp;
+ <a href="dumps.html">Dumps...</a>
+ </form>
+ </div>
+ <br>
+ <hr>
+"""
+
+
+WEB_FOOTER = """
+</body>
+</html>
+"""
+
+
+class WebParameterError(Exception):
+ def __init__(self, message):
+ Exception.__init__(self, message)
+
+
+class InspectionWebHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+ def formatter(self, query_components):
+ name = query_components.get("dump", [None])[0]
+ return self.server.get_dump_formatter(name)
+
+ def send_success_html_headers(self):
+ self.send_response(200)
+ self.send_header("Cache-Control", "no-cache, no-store, must-revalidate")
+ self.send_header("Pragma", "no-cache")
+ self.send_header("Expires", "0")
+ self.send_header('Content-type','text/html')
+ self.end_headers()
+ return
+
+ def do_GET(self):
+ try:
+ parsedurl = urlparse.urlparse(self.path)
+ query_components = urlparse.parse_qs(parsedurl.query)
+ if parsedurl.path == "/dumps.html":
+ self.send_success_html_headers()
+ self.server.output_dumps(self.wfile)
+ elif parsedurl.path == "/summary.html":
+ self.send_success_html_headers()
+ self.formatter(query_components).output_summary(self.wfile)
+ elif parsedurl.path == "/info.html":
+ self.send_success_html_headers()
+ self.formatter(query_components).output_info(self.wfile)
+ elif parsedurl.path == "/modules.html":
+ self.send_success_html_headers()
+ self.formatter(query_components).output_modules(self.wfile)
+ elif parsedurl.path == "/search.html":
+ address = query_components.get("val", [])
+ if len(address) != 1:
+ self.send_error(404, "Invalid params")
+ return
+ self.send_success_html_headers()
+ self.formatter(query_components).output_search_res(
+ self.wfile, address[0])
+ elif parsedurl.path == "/disasm.html":
+ address = query_components.get("val", [])
+ exact = query_components.get("exact", ["on"])
+ if len(address) != 1:
+ self.send_error(404, "Invalid params")
+ return
+ self.send_success_html_headers()
+ self.formatter(query_components).output_disasm(
+ self.wfile, address[0], exact[0])
+ elif parsedurl.path == "/data.html":
+ address = query_components.get("val", [])
+ datakind = query_components.get("type", ["address"])
+ if len(address) == 1 and len(datakind) == 1:
+ self.send_success_html_headers()
+ self.formatter(query_components).output_data(
+ self.wfile, address[0], datakind[0])
+ else:
+ self.send_error(404,'Invalid params')
+ elif parsedurl.path == "/setdumpdesc":
+ name = query_components.get("dump", [""])
+ description = query_components.get("description", [""])
+ if len(name) == 1 and len(description) == 1:
+ name = name[0]
+ description = description[0]
+ if self.server.set_dump_desc(name, description):
+ self.send_success_html_headers()
+ self.wfile.write("OK")
+ return
+ self.send_error(404,'Invalid params')
+ elif parsedurl.path == "/setcomment":
+ address = query_components.get("address", [])
+ comment = query_components.get("comment", [""])
+ if len(address) == 1 and len(comment) == 1:
+ address = address[0]
+ comment = comment[0]
+ self.formatter(query_components).set_comment(address, comment)
+ self.send_success_html_headers()
+ self.wfile.write("OK")
+ else:
+ self.send_error(404,'Invalid params')
+ elif parsedurl.path == "/setpageaddress":
+ kind = query_components.get("kind", [])
+ address = query_components.get("address", [""])
+ if len(kind) == 1 and len(address) == 1:
+ kind = kind[0]
+ address = address[0]
+ self.formatter(query_components).set_page_address(kind, address)
+ self.send_success_html_headers()
+ self.wfile.write("OK")
+ else:
+ self.send_error(404,'Invalid params')
+ else:
+ self.send_error(404,'File Not Found: %s' % self.path)
+
+ except IOError:
+ self.send_error(404,'File Not Found: %s' % self.path)
+
+ except WebParameterError as e:
+ self.send_error(404, 'Web parameter error: %s' % e.message)
+
+
+HTML_REG_FORMAT = "<span class=\"register\"><b>%s</b>:&nbsp;%s</span>\n"
+
+
+class InspectionWebFormatter(object):
+ CONTEXT_FULL = 0
+ CONTEXT_SHORT = 1
+
+ def __init__(self, switches, minidump_name, http_server):
+ self.dumpfilename = os.path.split(minidump_name)[1]
+ self.encfilename = urllib.urlencode({ 'dump' : self.dumpfilename })
+ self.reader = MinidumpReader(switches, minidump_name)
+ self.server = http_server
+
+ # Set up the heap
+ exception_thread = self.reader.thread_map[self.reader.exception.thread_id]
+ stack_top = self.reader.ExceptionSP()
+ stack_bottom = exception_thread.stack.start + \
+ exception_thread.stack.memory.data_size
+ stack_map = {self.reader.ExceptionIP(): -1}
+ for slot in xrange(stack_top, stack_bottom, self.reader.PointerSize()):
+ maybe_address = self.reader.ReadUIntPtr(slot)
+ if not maybe_address in stack_map:
+ stack_map[maybe_address] = slot
+ self.heap = V8Heap(self.reader, stack_map)
+
+ self.padawan = InspectionPadawan(self.reader, self.heap)
+ self.comments = InspectionInfo(minidump_name, self.reader)
+ self.padawan.known_first_data_page = (
+ self.comments.get_page_address("datapage"))
+ self.padawan.known_first_map_page = (
+ self.comments.get_page_address("mappage"))
+ self.padawan.known_first_pointer_page = (
+ self.comments.get_page_address("pointerpage"))
+
+ def set_comment(self, straddress, comment):
+ try:
+ address = int(straddress, 0)
+ self.comments.set_comment(address, comment)
+ except ValueError:
+ print "Invalid address"
+
+ def set_page_address(self, kind, straddress):
+ try:
+ address = int(straddress, 0)
+ if kind == "datapage":
+ self.padawan.known_first_data_page = address
+ elif kind == "mappage":
+ self.padawan.known_first_map_page = address
+ elif kind == "pointerpage":
+ self.padawan.known_first_pointer_page = address
+ self.comments.save_page_address(kind, address)
+ except ValueError:
+ print "Invalid address"
+
+ def td_from_address(self, f, address):
+ f.write("<td %s>" % self.comments.get_style_class_string(address))
+
+ def format_address(self, maybeaddress, straddress = None):
+ if maybeaddress is None:
+ return "not in dump"
+ else:
+ if straddress is None:
+ straddress = "0x" + self.reader.FormatIntPtr(maybeaddress)
+ style_class = ""
+ if not self.reader.IsValidAddress(maybeaddress):
+ style_class = " class=\"nodump\""
+ return ("<a %s href=\"search.html?%s&amp;val=%s\">%s</a>" %
+ (style_class, self.encfilename, straddress, straddress))
+
+ def output_header(self, f):
+ f.write(WEB_HEADER %
+ { "query_dump" : self.encfilename,
+ "dump_name" : cgi.escape(self.dumpfilename) })
+
+ def output_footer(self, f):
+ f.write(WEB_FOOTER)
+
+ MAX_CONTEXT_STACK = 4096
+
+ def output_summary(self, f):
+ self.output_header(f)
+ f.write('<div class="code">')
+ self.output_context(f, InspectionWebFormatter.CONTEXT_SHORT)
+ self.output_disasm_pc(f)
+
+ # Output stack
+ exception_thread = self.reader.thread_map[self.reader.exception.thread_id]
+ stack_bottom = exception_thread.stack.start + \
+ min(exception_thread.stack.memory.data_size, self.MAX_CONTEXT_STACK)
+ stack_top = self.reader.ExceptionSP()
+ self.output_words(f, stack_top - 16, stack_bottom, stack_top, "Stack")
+
+ f.write('</div>')
+ self.output_footer(f)
+ return
+
+ def output_info(self, f):
+ self.output_header(f)
+ f.write("<h3>Dump info</h3>\n")
+ f.write("Description: ")
+ self.server.output_dump_desc_field(f, self.dumpfilename)
+ f.write("<br>\n")
+ f.write("Filename: ")
+ f.write("<span class=\"code\">%s</span><br>\n" % (self.dumpfilename))
+ dt = datetime.datetime.fromtimestamp(self.reader.header.time_date_stampt)
+ f.write("Timestamp: %s<br>\n" % dt.strftime('%Y-%m-%d %H:%M:%S'))
+ self.output_context(f, InspectionWebFormatter.CONTEXT_FULL)
+ self.output_address_ranges(f)
+ self.output_footer(f)
+ return
+
+ def output_address_ranges(self, f):
+ regions = {}
+ def print_region(_reader, start, size, _location):
+ regions[start] = size
+ self.reader.ForEachMemoryRegion(print_region)
+ f.write("<h3>Available memory regions</h3>\n")
+ f.write('<div class="code">')
+ f.write("<table class=\"regions\">\n")
+ f.write("<thead><tr>")
+ f.write("<th>Start address</th>")
+ f.write("<th>End address</th>")
+ f.write("<th>Number of bytes</th>")
+ f.write("</tr></thead>\n")
+ for start in sorted(regions):
+ size = regions[start]
+ f.write("<tr>")
+ f.write("<td>%s</td>" % self.format_address(start))
+ f.write("<td>&nbsp;%s</td>" % self.format_address(start + size))
+ f.write("<td>&nbsp;%d</td>" % size)
+ f.write("</tr>\n")
+ f.write("</table>\n")
+ f.write('</div>')
+ return
+
+ def output_module_details(self, f, module):
+ f.write("<b>%s</b>" % GetModuleName(self.reader, module))
+ file_version = GetVersionString(module.version_info.dwFileVersionMS,
+ module.version_info.dwFileVersionLS)
+ product_version = GetVersionString(module.version_info.dwProductVersionMS,
+ module.version_info.dwProductVersionLS)
+ f.write("<br>&nbsp;&nbsp;\n")
+ f.write("base: %s" % self.reader.FormatIntPtr(module.base_of_image))
+ f.write("<br>&nbsp;&nbsp;\n")
+ f.write(" end: %s" % self.reader.FormatIntPtr(module.base_of_image +
+ module.size_of_image))
+ f.write("<br>&nbsp;&nbsp;\n")
+ f.write(" file version: %s" % file_version)
+ f.write("<br>&nbsp;&nbsp;\n")
+ f.write(" product version: %s" % product_version)
+ f.write("<br>&nbsp;&nbsp;\n")
+ time_date_stamp = datetime.datetime.fromtimestamp(module.time_date_stamp)
+ f.write(" timestamp: %s" % time_date_stamp)
+ f.write("<br>\n");
+
+ def output_modules(self, f):
+ self.output_header(f)
+ f.write('<div class="code">')
+ for module in self.reader.module_list.modules:
+ self.output_module_details(f, module)
+ f.write("</div>")
+ self.output_footer(f)
+ return
+
+ def output_context(self, f, details):
+ exception_thread = self.reader.thread_map[self.reader.exception.thread_id]
+ f.write("<h3>Exception context</h3>")
+ f.write('<div class="code">\n')
+ f.write("Thread id: %d" % exception_thread.id)
+ f.write("&nbsp;&nbsp; Exception code: %08X\n" %
+ self.reader.exception.exception.code)
+ if details == InspectionWebFormatter.CONTEXT_FULL:
+ if self.reader.exception.exception.parameter_count > 0:
+ f.write("&nbsp;&nbsp; Exception parameters: \n")
+ for i in xrange(0, self.reader.exception.exception.parameter_count):
+ f.write("%08x" % self.reader.exception.exception.information[i])
+ f.write("<br><br>\n")
+
+ for r in CONTEXT_FOR_ARCH[self.reader.arch]:
+ f.write(HTML_REG_FORMAT %
+ (r, self.format_address(self.reader.Register(r))))
+ # TODO(vitalyr): decode eflags.
+ if self.reader.arch == MD_CPU_ARCHITECTURE_ARM:
+ f.write("<b>cpsr</b>: %s" % bin(self.reader.exception_context.cpsr)[2:])
+ else:
+ f.write("<b>eflags</b>: %s" %
+ bin(self.reader.exception_context.eflags)[2:])
+ f.write('</div>\n')
+ return
+
+ def align_down(self, a, size):
+ alignment_correction = a % size
+ return a - alignment_correction
+
+ def align_up(self, a, size):
+ alignment_correction = (size - 1) - ((a + size - 1) % size)
+ return a + alignment_correction
+
+ def format_object(self, address):
+ heap_object = self.padawan.SenseObject(address)
+ return cgi.escape(str(heap_object or ""))
+
+ def output_data(self, f, straddress, datakind):
+ try:
+ self.output_header(f)
+ address = int(straddress, 0)
+ if not self.reader.IsValidAddress(address):
+ f.write("<h3>Address 0x%x not found in the dump.</h3>" % address)
+ return
+ region = self.reader.FindRegion(address)
+ if datakind == "address":
+ self.output_words(f, region[0], region[0] + region[1], address, "Dump")
+ elif datakind == "ascii":
+ self.output_ascii(f, region[0], region[0] + region[1], address)
+ self.output_footer(f)
+
+ except ValueError:
+ f.write("<h3>Unrecognized address format \"%s\".</h3>" % straddress)
+ return
+
+ def output_words(self, f, start_address, end_address,
+ highlight_address, desc):
+ region = self.reader.FindRegion(highlight_address)
+ if region is None:
+ f.write("<h3>Address 0x%x not found in the dump.</h3>\n" %
+ (highlight_address))
+ return
+ size = self.heap.PointerSize()
+ start_address = self.align_down(start_address, size)
+ low = self.align_down(region[0], size)
+ high = self.align_up(region[0] + region[1], size)
+ if start_address < low:
+ start_address = low
+ end_address = self.align_up(end_address, size)
+ if end_address > high:
+ end_address = high
+
+ expand = ""
+ if start_address != low or end_address != high:
+ expand = ("(<a href=\"data.html?%s&amp;val=0x%x#highlight\">"
+ " more..."
+ " </a>)" %
+ (self.encfilename, highlight_address))
+
+ f.write("<h3>%s 0x%x - 0x%x, "
+ "highlighting <a href=\"#highlight\">0x%x</a> %s</h3>\n" %
+ (desc, start_address, end_address, highlight_address, expand))
+ f.write('<div class="code">')
+ f.write("<table class=\"codedump\">\n")
+
+ for slot in xrange(start_address, end_address, size):
+ heap_object = ""
+ maybe_address = None
+ end_region = region[0] + region[1]
+ if slot < region[0] or slot + size > end_region:
+ straddress = "0x"
+ for i in xrange(end_region, slot + size):
+ straddress += "??"
+ for i in reversed(
+ xrange(max(slot, region[0]), min(slot + size, end_region))):
+ straddress += "%02x" % self.reader.ReadU8(i)
+ for i in xrange(slot, region[0]):
+ straddress += "??"
+ else:
+ maybe_address = self.reader.ReadUIntPtr(slot)
+ straddress = self.format_address(maybe_address)
+ if maybe_address:
+ heap_object = self.format_object(maybe_address)
+
+ address_fmt = "%s&nbsp;</td>\n"
+ if slot == highlight_address:
+ f.write("<tr class=\"highlight-line\">\n")
+ address_fmt = "<a id=\"highlight\"></a>%s&nbsp;</td>\n"
+ elif slot < highlight_address and highlight_address < slot + size:
+ f.write("<tr class=\"inexact-highlight-line\">\n")
+ address_fmt = "<a id=\"highlight\"></a>%s&nbsp;</td>\n"
+ else:
+ f.write("<tr>\n")
+
+ f.write(" <td>")
+ self.output_comment_box(f, "da-", slot)
+ f.write("</td>\n")
+ f.write(" ")
+ self.td_from_address(f, slot)
+ f.write(address_fmt % self.format_address(slot))
+ f.write(" ")
+ self.td_from_address(f, maybe_address)
+ f.write(":&nbsp; %s &nbsp;</td>\n" % straddress)
+ f.write(" <td>")
+ if maybe_address != None:
+ self.output_comment_box(
+ f, "sv-" + self.reader.FormatIntPtr(slot), maybe_address)
+ f.write(" </td>\n")
+ f.write(" <td>%s</td>\n" % (heap_object or ''))
+ f.write("</tr>\n")
+ f.write("</table>\n")
+ f.write("</div>")
+ return
+
+ def output_ascii(self, f, start_address, end_address, highlight_address):
+ region = self.reader.FindRegion(highlight_address)
+ if region is None:
+ f.write("<h3>Address %x not found in the dump.</h3>" %
+ highlight_address)
+ return
+ if start_address < region[0]:
+ start_address = region[0]
+ if end_address > region[0] + region[1]:
+ end_address = region[0] + region[1]
+
+ expand = ""
+ if start_address != region[0] or end_address != region[0] + region[1]:
+ link = ("data.html?%s&amp;val=0x%x&amp;type=ascii#highlight" %
+ (self.encfilename, highlight_address))
+ expand = "(<a href=\"%s\">more...</a>)" % link
+
+ f.write("<h3>ASCII dump 0x%x - 0x%x, highlighting 0x%x %s</h3>" %
+ (start_address, end_address, highlight_address, expand))
+
+ line_width = 64
+
+ f.write('<div class="code">')
+
+ start = self.align_down(start_address, line_width)
+
+ for address in xrange(start, end_address):
+ if address % 64 == 0:
+ if address != start:
+ f.write("<br>")
+ f.write("0x%08x:&nbsp;" % address)
+ if address < start_address:
+ f.write("&nbsp;")
+ else:
+ if address == highlight_address:
+ f.write("<span class=\"highlight\">")
+ code = self.reader.ReadU8(address)
+ if code < 127 and code >= 32:
+ f.write("&#")
+ f.write(str(code))
+ f.write(";")
+ else:
+ f.write("&middot;")
+ if address == highlight_address:
+ f.write("</span>")
+ f.write("</div>")
+ return
+
+ def output_disasm(self, f, straddress, strexact):
+ try:
+ self.output_header(f)
+ address = int(straddress, 0)
+ if not self.reader.IsValidAddress(address):
+ f.write("<h3>Address 0x%x not found in the dump.</h3>" % address)
+ return
+ region = self.reader.FindRegion(address)
+ self.output_disasm_range(
+ f, region[0], region[0] + region[1], address, strexact == "on")
+ self.output_footer(f)
+ except ValueError:
+ f.write("<h3>Unrecognized address format \"%s\".</h3>" % straddress)
+ return
+
+ def output_disasm_range(
+ self, f, start_address, end_address, highlight_address, exact):
+ region = self.reader.FindRegion(highlight_address)
+ if start_address < region[0]:
+ start_address = region[0]
+ if end_address > region[0] + region[1]:
+ end_address = region[0] + region[1]
+ count = end_address - start_address
+ lines = self.reader.GetDisasmLines(start_address, count)
+ found = False
+ if exact:
+ for line in lines:
+ if line[0] + start_address == highlight_address:
+ found = True
+ break
+ if not found:
+ start_address = highlight_address
+ count = end_address - start_address
+ lines = self.reader.GetDisasmLines(highlight_address, count)
+ expand = ""
+ if start_address != region[0] or end_address != region[0] + region[1]:
+ exactness = ""
+ if exact and not found and end_address == region[0] + region[1]:
+ exactness = "&amp;exact=off"
+ expand = ("(<a href=\"disasm.html?%s%s"
+ "&amp;val=0x%x#highlight\">more...</a>)" %
+ (self.encfilename, exactness, highlight_address))
+
+ f.write("<h3>Disassembling 0x%x - 0x%x, highlighting 0x%x %s</h3>" %
+ (start_address, end_address, highlight_address, expand))
+ f.write('<div class="code">')
+ f.write("<table class=\"codedump\">\n");
+ for i in xrange(0, len(lines)):
+ line = lines[i]
+ next_address = count
+ if i + 1 < len(lines):
+ next_line = lines[i + 1]
+ next_address = next_line[0]
+ self.format_disasm_line(
+ f, start_address, line, next_address, highlight_address)
+ f.write("</table>\n")
+ f.write("</div>")
+ return
+
+ def annotate_disasm_addresses(self, line):
+ extra = []
+ for m in ADDRESS_RE.finditer(line):
+ maybe_address = int(m.group(0), 16)
+ formatted_address = self.format_address(maybe_address, m.group(0))
+ line = line.replace(m.group(0), formatted_address)
+ object_info = self.padawan.SenseObject(maybe_address)
+ if not object_info:
+ continue
+ extra.append(cgi.escape(str(object_info)))
+ if len(extra) == 0:
+ return line
+ return ("%s <span class=\"disasmcomment\">;; %s</span>" %
+ (line, ", ".join(extra)))
+
+ def format_disasm_line(
+ self, f, start, line, next_address, highlight_address):
+ line_address = start + line[0]
+ address_fmt = " <td>%s</td>\n"
+ if line_address == highlight_address:
+ f.write("<tr class=\"highlight-line\">\n")
+ address_fmt = " <td><a id=\"highlight\">%s</a></td>\n"
+ elif (line_address < highlight_address and
+ highlight_address < next_address + start):
+ f.write("<tr class=\"inexact-highlight-line\">\n")
+ address_fmt = " <td><a id=\"highlight\">%s</a></td>\n"
+ else:
+ f.write("<tr>\n")
+ num_bytes = next_address - line[0]
+ stack_slot = self.heap.stack_map.get(line_address)
+ marker = ""
+ if stack_slot:
+ marker = "=>"
+ op_offset = 3 * num_bytes - 1
+
+ code = line[1]
+ # Compute the actual call target which the disassembler is too stupid
+ # to figure out (it adds the call offset to the disassembly offset rather
+ # than the absolute instruction address).
+ if self.heap.reader.arch == MD_CPU_ARCHITECTURE_X86:
+ if code.startswith("e8"):
+ words = code.split()
+ if len(words) > 6 and words[5] == "call":
+ offset = int(words[4] + words[3] + words[2] + words[1], 16)
+ target = (line_address + offset + 5) & 0xFFFFFFFF
+ code = code.replace(words[6], "0x%08x" % target)
+ # TODO(jkummerow): port this hack to ARM and x64.
+
+ opcodes = code[:op_offset]
+ code = self.annotate_disasm_addresses(code[op_offset:])
+ f.write(" <td>")
+ self.output_comment_box(f, "codel-", line_address)
+ f.write("</td>\n")
+ f.write(address_fmt % marker)
+ f.write(" ")
+ self.td_from_address(f, line_address)
+ f.write("%s (+0x%x)</td>\n" %
+ (self.format_address(line_address), line[0]))
+ f.write(" <td>:&nbsp;%s&nbsp;</td>\n" % opcodes)
+ f.write(" <td>%s</td>\n" % code)
+ f.write("</tr>\n")
+
+ def output_comment_box(self, f, prefix, address):
+ f.write("<input type=\"text\" class=\"commentinput\" "
+ "id=\"%s-address-0x%s\" onchange=\"comment()\" value=\"%s\">" %
+ (prefix,
+ self.reader.FormatIntPtr(address),
+ cgi.escape(self.comments.get_comment(address)) or ""))
+
+ MAX_FOUND_RESULTS = 100
+
+ def output_find_results(self, f, results):
+ f.write("Addresses")
+ toomany = len(results) > self.MAX_FOUND_RESULTS
+ if toomany:
+ f.write("(found %i results, displaying only first %i)" %
+ (len(results), self.MAX_FOUND_RESULTS))
+ f.write(": \n")
+ results = sorted(results)
+ results = results[:min(len(results), self.MAX_FOUND_RESULTS)]
+ for address in results:
+ f.write("<span %s>%s</span>\n" %
+ (self.comments.get_style_class_string(address),
+ self.format_address(address)))
+ if toomany:
+ f.write("...\n")
+
+
+ def output_page_info(self, f, page_kind, page_address, my_page_address):
+ if my_page_address == page_address and page_address != 0:
+ f.write("Marked first %s page.\n" % page_kind)
+ else:
+ f.write("<span id=\"%spage\" style=\"display:none\">" % page_kind)
+ f.write("Marked first %s page." % page_kind)
+ f.write("</span>\n")
+ f.write("<button onclick=\"onpage('%spage', '0x%x')\">" %
+ (page_kind, my_page_address))
+ f.write("Mark as first %s page</button>\n" % page_kind)
+ return
+
+ def output_search_res(self, f, straddress):
+ try:
+ self.output_header(f)
+ f.write("<h3>Search results for %s</h3>" % straddress)
+
+ address = int(straddress, 0)
+
+ f.write("Comment: ")
+ self.output_comment_box(f, "search-", address)
+ f.write("<br>\n")
+
+ page_address = address & ~self.heap.PageAlignmentMask()
+
+ f.write("Page info: \n")
+ self.output_page_info(f, "data", self.padawan.known_first_data_page, \
+ page_address)
+ self.output_page_info(f, "map", self.padawan.known_first_map_page, \
+ page_address)
+ self.output_page_info(f, "pointer", \
+ self.padawan.known_first_pointer_page, \
+ page_address)
+
+ if not self.reader.IsValidAddress(address):
+ f.write("<h3>The contents at address %s not found in the dump.</h3>" % \
+ straddress)
+ else:
+ # Print as words
+ self.output_words(f, address - 8, address + 32, address, "Dump")
+
+ # Print as ASCII
+ f.write("<hr>\n")
+ self.output_ascii(f, address, address + 256, address)
+
+ # Print as code
+ f.write("<hr>\n")
+ self.output_disasm_range(f, address - 16, address + 16, address, True)
+
+ aligned_res, unaligned_res = self.reader.FindWordList(address)
+
+ if len(aligned_res) > 0:
+ f.write("<h3>Occurrences of 0x%x at aligned addresses</h3>\n" %
+ address)
+ self.output_find_results(f, aligned_res)
+
+ if len(unaligned_res) > 0:
+ f.write("<h3>Occurrences of 0x%x at unaligned addresses</h3>\n" % \
+ address)
+ self.output_find_results(f, unaligned_res)
+
+ if len(aligned_res) + len(unaligned_res) == 0:
+ f.write("<h3>No occurences of 0x%x found in the dump</h3>\n" % address)
+
+ self.output_footer(f)
+
+ except ValueError:
+ f.write("<h3>Unrecognized address format \"%s\".</h3>" % straddress)
+ return
+
+ def output_disasm_pc(self, f):
+ address = self.reader.ExceptionIP()
+ if not self.reader.IsValidAddress(address):
+ return
+ self.output_disasm_range(f, address - 16, address + 16, address, True)
+
+
+WEB_DUMPS_HEADER = """
+<!DOCTYPE html>
+<html>
+<head>
+<meta content="text/html; charset=utf-8" http-equiv="content-type">
+<style media="screen" type="text/css">
+
+.dumplist {
+ border-collapse : collapse;
+ border-spacing : 0px;
+ font-family: monospace;
+}
+
+.dumpcomments {
+ border : 1px solid LightGray;
+ width : 32em;
+}
+
+</style>
+
+<script type="application/javascript">
+
+var dump_str = "dump-";
+var dump_len = dump_str.length;
+
+function dump_comment() {
+ var s = event.srcElement.id;
+ var index = s.indexOf(dump_str);
+ if (index >= 0) {
+ send_dump_desc(s.substring(index + dump_len), event.srcElement.value);
+ }
+}
+
+function send_dump_desc(name, desc) {
+ xmlhttp = new XMLHttpRequest();
+ name = encodeURIComponent(name)
+ desc = encodeURIComponent(desc)
+ xmlhttp.open("GET",
+ "setdumpdesc?dump=" + name +
+ "&description=" + desc, true);
+ xmlhttp.send();
+}
+
+</script>
+
+<title>Dump list</title>
+</head>
+
+<body>
+"""
+
+WEB_DUMPS_FOOTER = """
+</body>
+</html>
+"""
+
+DUMP_FILE_RE = re.compile(r"[-_0-9a-zA-Z][-\._0-9a-zA-Z]*\.dmp$")
+
+
+class InspectionWebServer(BaseHTTPServer.HTTPServer):
+ def __init__(self, port_number, switches, minidump_name):
+ BaseHTTPServer.HTTPServer.__init__(
+ self, ('', port_number), InspectionWebHandler)
+ splitpath = os.path.split(minidump_name)
+ self.dumppath = splitpath[0]
+ self.dumpfilename = splitpath[1]
+ self.default_formatter = InspectionWebFormatter(
+ switches, minidump_name, self)
+ self.formatters = { self.dumpfilename : self.default_formatter }
+ self.switches = switches
+
+ def output_dump_desc_field(self, f, name):
+ try:
+ descfile = open(os.path.join(self.dumppath, name + ".desc"), "r")
+ desc = descfile.readline()
+ descfile.close()
+ except IOError:
+ desc = ""
+ f.write("<input type=\"text\" class=\"dumpcomments\" "
+ "id=\"dump-%s\" onchange=\"dump_comment()\" value=\"%s\">\n" %
+ (cgi.escape(name), desc))
+
+ def set_dump_desc(self, name, description):
+ if not DUMP_FILE_RE.match(name):
+ return False
+ fname = os.path.join(self.dumppath, name)
+ if not os.path.isfile(fname):
+ return False
+ fname = fname + ".desc"
+ descfile = open(fname, "w")
+ descfile.write(description)
+ descfile.close()
+ return True
+
+ def get_dump_formatter(self, name):
+ if name is None:
+ return self.default_formatter
+ else:
+ if not DUMP_FILE_RE.match(name):
+ raise WebParameterError("Invalid name '%s'" % name)
+ formatter = self.formatters.get(name, None)
+ if formatter is None:
+ try:
+ formatter = InspectionWebFormatter(
+ self.switches, os.path.join(self.dumppath, name), self)
+ self.formatters[name] = formatter
+ except IOError:
+ raise WebParameterError("Could not open dump '%s'" % name)
+ return formatter
+
+ def output_dumps(self, f):
+ f.write(WEB_DUMPS_HEADER)
+ f.write("<h3>List of available dumps</h3>")
+ f.write("<table class=\"dumplist\">\n")
+ f.write("<thead><tr>")
+ f.write("<th>Name</th>")
+ f.write("<th>File time</th>")
+ f.write("<th>Comment</th>")
+ f.write("</tr></thead>")
+ dumps_by_time = {}
+ for fname in os.listdir(self.dumppath):
+ if DUMP_FILE_RE.match(fname):
+ mtime = os.stat(os.path.join(self.dumppath, fname)).st_mtime
+ fnames = dumps_by_time.get(mtime, [])
+ fnames.append(fname)
+ dumps_by_time[mtime] = fnames
+
+ for mtime in sorted(dumps_by_time, reverse=True):
+ fnames = dumps_by_time[mtime]
+ for fname in fnames:
+ f.write("<tr>\n")
+ f.write("<td><a href=\"summary.html?%s\">%s</a></td>\n" % (
+ (urllib.urlencode({ 'dump' : fname }), fname)))
+ f.write("<td>&nbsp;&nbsp;&nbsp;")
+ f.write(datetime.datetime.fromtimestamp(mtime))
+ f.write("</td>")
+ f.write("<td>&nbsp;&nbsp;&nbsp;")
+ self.output_dump_desc_field(f, fname)
+ f.write("</td>")
+ f.write("</tr>\n")
+ f.write("</table>\n")
+ f.write(WEB_DUMPS_FOOTER)
+ return
class InspectionShell(cmd.Cmd):
def __init__(self, reader, heap):
@@ -1996,6 +3123,8 @@ if __name__ == "__main__":
parser = optparse.OptionParser(USAGE)
parser.add_option("-s", "--shell", dest="shell", action="store_true",
help="start an interactive inspector shell")
+ parser.add_option("-w", "--web", dest="web", action="store_true",
+ help="start a web server on localhost:%i" % PORT_NUMBER)
parser.add_option("-c", "--command", dest="command", default="",
help="run an interactive inspector shell command and exit")
parser.add_option("-f", "--full", dest="full", action="store_true",
@@ -2014,4 +3143,14 @@ if __name__ == "__main__":
if len(args) != 1:
parser.print_help()
sys.exit(1)
- AnalyzeMinidump(options, args[0])
+ if options.web:
+ try:
+ server = InspectionWebServer(PORT_NUMBER, options, args[0])
+ print 'Started httpserver on port ' , PORT_NUMBER
+ webbrowser.open('http://localhost:%i/summary.html' % PORT_NUMBER)
+ server.serve_forever()
+ except KeyboardInterrupt:
+ print '^C received, shutting down the web server'
+ server.socket.close()
+ else:
+ AnalyzeMinidump(options, args[0])
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index d78853789..f7bdf52b9 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -27,6 +27,7 @@
{
'variables': {
+ 'icu_use_data_file_flag%': 0,
'v8_code': 1,
'v8_random_seed%': 314159265,
},
@@ -258,6 +259,7 @@
'../../src/assembler.cc',
'../../src/assembler.h',
'../../src/assert-scope.h',
+ '../../src/assert-scope.cc',
'../../src/ast.cc',
'../../src/ast.h',
'../../src/atomicops.h',
@@ -343,6 +345,7 @@
'../../src/factory.h',
'../../src/fast-dtoa.cc',
'../../src/fast-dtoa.h',
+ '../../src/feedback-slots.h',
'../../src/fixed-dtoa.cc',
'../../src/fixed-dtoa.h',
'../../src/flag-definitions.h',
@@ -406,8 +409,6 @@
'../../src/hydrogen-mark-deoptimize.h',
'../../src/hydrogen-mark-unreachable.cc',
'../../src/hydrogen-mark-unreachable.h',
- '../../src/hydrogen-minus-zero.cc',
- '../../src/hydrogen-minus-zero.h',
'../../src/hydrogen-osr.cc',
'../../src/hydrogen-osr.h',
'../../src/hydrogen-range-analysis.cc',
@@ -420,6 +421,8 @@
'../../src/hydrogen-representation-changes.h',
'../../src/hydrogen-sce.cc',
'../../src/hydrogen-sce.h',
+ '../../src/hydrogen-store-elimination.cc',
+ '../../src/hydrogen-store-elimination.h',
'../../src/hydrogen-uint32-analysis.cc',
'../../src/hydrogen-uint32-analysis.h',
'../../src/i18n.cc',
@@ -645,6 +648,52 @@
'../../src/arm/stub-cache-arm.cc',
],
}],
+ ['v8_target_arch=="arm64"', {
+ 'sources': [ ### gcmole(arch:arm64) ###
+ '../../src/arm64/assembler-arm64.cc',
+ '../../src/arm64/assembler-arm64.h',
+ '../../src/arm64/assembler-arm64-inl.h',
+ '../../src/arm64/builtins-arm64.cc',
+ '../../src/arm64/codegen-arm64.cc',
+ '../../src/arm64/codegen-arm64.h',
+ '../../src/arm64/code-stubs-arm64.cc',
+ '../../src/arm64/code-stubs-arm64.h',
+ '../../src/arm64/constants-arm64.h',
+ '../../src/arm64/cpu-arm64.cc',
+ '../../src/arm64/cpu-arm64.h',
+ '../../src/arm64/debug-arm64.cc',
+ '../../src/arm64/decoder-arm64.cc',
+ '../../src/arm64/decoder-arm64.h',
+ '../../src/arm64/decoder-arm64-inl.h',
+ '../../src/arm64/deoptimizer-arm64.cc',
+ '../../src/arm64/disasm-arm64.cc',
+ '../../src/arm64/disasm-arm64.h',
+ '../../src/arm64/frames-arm64.cc',
+ '../../src/arm64/frames-arm64.h',
+ '../../src/arm64/full-codegen-arm64.cc',
+ '../../src/arm64/ic-arm64.cc',
+ '../../src/arm64/instructions-arm64.cc',
+ '../../src/arm64/instructions-arm64.h',
+ '../../src/arm64/instrument-arm64.cc',
+ '../../src/arm64/instrument-arm64.h',
+ '../../src/arm64/lithium-arm64.cc',
+ '../../src/arm64/lithium-arm64.h',
+ '../../src/arm64/lithium-codegen-arm64.cc',
+ '../../src/arm64/lithium-codegen-arm64.h',
+ '../../src/arm64/lithium-gap-resolver-arm64.cc',
+ '../../src/arm64/lithium-gap-resolver-arm64.h',
+ '../../src/arm64/macro-assembler-arm64.cc',
+ '../../src/arm64/macro-assembler-arm64.h',
+ '../../src/arm64/macro-assembler-arm64-inl.h',
+ '../../src/arm64/regexp-macro-assembler-arm64.cc',
+ '../../src/arm64/regexp-macro-assembler-arm64.h',
+ '../../src/arm64/simulator-arm64.cc',
+ '../../src/arm64/simulator-arm64.h',
+ '../../src/arm64/stub-cache-arm64.cc',
+ '../../src/arm64/utils-arm64.cc',
+ '../../src/arm64/utils-arm64.h',
+ ],
+ }],
['v8_target_arch=="ia32" or v8_target_arch=="mac" or OS=="mac"', {
'sources': [ ### gcmole(arch:ia32) ###
'../../src/ia32/assembler-ia32-inl.h',
@@ -972,6 +1021,17 @@
'../../src/default-platform.h',
],
}],
+ ['icu_use_data_file_flag==1', {
+ 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'],
+ }, { # else icu_use_data_file_flag !=1
+ 'conditions': [
+ ['OS=="win"', {
+ 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'],
+ }, {
+ 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'],
+ }],
+ ],
+ }],
],
},
{
@@ -1013,6 +1073,9 @@
'../../src/regexp.js',
'../../src/arraybuffer.js',
'../../src/typedarray.js',
+ '../../src/weak_collection.js',
+ '../../src/promise.js',
+ '../../src/object-observe.js',
'../../src/macros.py',
],
'experimental_library_files': [
@@ -1020,8 +1083,6 @@
'../../src/symbol.js',
'../../src/proxy.js',
'../../src/collection.js',
- '../../src/object-observe.js',
- '../../src/promise.js',
'../../src/generator.js',
'../../src/array-iterator.js',
'../../src/harmony-string.js',
diff --git a/deps/v8/tools/lexer-shell.cc b/deps/v8/tools/lexer-shell.cc
index 0610e7f70..e2e4a9c25 100644
--- a/deps/v8/tools/lexer-shell.cc
+++ b/deps/v8/tools/lexer-shell.cc
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <assert.h>
-#include <fcntl.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
@@ -35,53 +34,18 @@
#include "v8.h"
#include "api.h"
-#include "ast.h"
-#include "char-predicates-inl.h"
#include "messages.h"
#include "platform.h"
#include "runtime.h"
#include "scanner-character-streams.h"
#include "scopeinfo.h"
+#include "shell-utils.h"
#include "string-stream.h"
#include "scanner.h"
using namespace v8::internal;
-enum Encoding {
- LATIN1,
- UTF8,
- UTF16
-};
-
-
-const byte* ReadFile(const char* name, Isolate* isolate,
- int* size, int repeat) {
- FILE* file = fopen(name, "rb");
- *size = 0;
- if (file == NULL) return NULL;
-
- fseek(file, 0, SEEK_END);
- int file_size = ftell(file);
- rewind(file);
-
- *size = file_size * repeat;
-
- byte* chars = new byte[*size + 1];
- for (int i = 0; i < file_size;) {
- int read = static_cast<int>(fread(&chars[i], 1, file_size - i, file));
- i += read;
- }
- fclose(file);
-
- for (int i = file_size; i < *size; i++) {
- chars[i] = chars[i - file_size];
- }
- chars[*size] = 0;
-
- return chars;
-}
-
class BaselineScanner {
public:
@@ -92,7 +56,7 @@ class BaselineScanner {
int repeat)
: stream_(NULL) {
int length = 0;
- source_ = ReadFile(fname, isolate, &length, repeat);
+ source_ = ReadFileAndRepeat(fname, &length, repeat);
unicode_cache_ = new UnicodeCache();
scanner_ = new Scanner(unicode_cache_);
switch (encoding) {
@@ -104,6 +68,7 @@ class BaselineScanner {
Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(source_),
length / 2));
+ CHECK_NOT_EMPTY_HANDLE(isolate, result);
stream_ =
new GenericStringUtf16CharacterStream(result, 0, result->length());
break;
@@ -111,6 +76,7 @@ class BaselineScanner {
case LATIN1: {
Handle<String> result = isolate->factory()->NewStringFromOneByte(
Vector<const uint8_t>(source_, length));
+ CHECK_NOT_EMPTY_HANDLE(isolate, result);
stream_ =
new GenericStringUtf16CharacterStream(result, 0, result->length());
break;
diff --git a/deps/v8/tools/lexer-shell.gyp b/deps/v8/tools/lexer-shell.gyp
index 8e6ab7a84..623a503a0 100644
--- a/deps/v8/tools/lexer-shell.gyp
+++ b/deps/v8/tools/lexer-shell.gyp
@@ -51,6 +51,29 @@
],
'sources': [
'lexer-shell.cc',
+ 'shell-utils.h',
+ ],
+ },
+ {
+ 'target_name': 'parser-shell',
+ 'type': 'executable',
+ 'dependencies': [
+ '../tools/gyp/v8.gyp:v8',
+ ],
+ 'conditions': [
+ ['v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
+ ],
+ }],
+ ],
+ 'include_dirs+': [
+ '../src',
+ ],
+ 'sources': [
+ 'parser-shell.cc',
+ 'shell-utils.h',
],
},
],
diff --git a/deps/v8/tools/merge-to-branch.sh b/deps/v8/tools/merge-to-branch.sh
index ccdae6c9b..4e8a86c83 100755
--- a/deps/v8/tools/merge-to-branch.sh
+++ b/deps/v8/tools/merge-to-branch.sh
@@ -235,7 +235,6 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
echo ">>> Step $CURRENT_STEP: Apply patches for selected revisions."
restore_if_unset "MERGE_TO_BRANCH"
restore_patch_commit_hashes_if_unset "PATCH_COMMIT_HASHES"
- rm -f "$TOUCHED_FILES_FILE"
for HASH in ${PATCH_COMMIT_HASHES[@]} ; do
echo "Applying patch for $HASH to $MERGE_TO_BRANCH..."
git log -1 -p $HASH > "$TEMPORARY_PATCH_FILE"
diff --git a/deps/v8/tools/parser-shell.cc b/deps/v8/tools/parser-shell.cc
new file mode 100644
index 000000000..4da15fc7e
--- /dev/null
+++ b/deps/v8/tools/parser-shell.cc
@@ -0,0 +1,171 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <assert.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string>
+#include <vector>
+#include "v8.h"
+
+#include "api.h"
+#include "compiler.h"
+#include "scanner-character-streams.h"
+#include "shell-utils.h"
+#include "parser.h"
+#include "preparse-data-format.h"
+#include "preparse-data.h"
+#include "preparser.h"
+
+using namespace v8::internal;
+
+enum TestMode {
+ PreParseAndParse,
+ PreParse,
+ Parse
+};
+
+std::pair<TimeDelta, TimeDelta> RunBaselineParser(
+ const char* fname, Encoding encoding, int repeat, v8::Isolate* isolate,
+ v8::Handle<v8::Context> context, TestMode test_mode) {
+ int length = 0;
+ const byte* source = ReadFileAndRepeat(fname, &length, repeat);
+ v8::Handle<v8::String> source_handle;
+ switch (encoding) {
+ case UTF8: {
+ source_handle = v8::String::NewFromUtf8(
+ isolate, reinterpret_cast<const char*>(source));
+ break;
+ }
+ case UTF16: {
+ source_handle = v8::String::NewFromTwoByte(
+ isolate, reinterpret_cast<const uint16_t*>(source),
+ v8::String::kNormalString, length / 2);
+ break;
+ }
+ case LATIN1: {
+ source_handle = v8::String::NewFromOneByte(isolate, source);
+ break;
+ }
+ }
+ v8::ScriptData* cached_data = NULL;
+ TimeDelta preparse_time, parse_time;
+ if (test_mode == PreParseAndParse || test_mode == PreParse) {
+ ElapsedTimer timer;
+ timer.Start();
+ cached_data = v8::ScriptData::PreCompile(source_handle);
+ preparse_time = timer.Elapsed();
+ if (cached_data == NULL || cached_data->HasError()) {
+ fprintf(stderr, "Preparsing failed\n");
+ return std::make_pair(TimeDelta(), TimeDelta());
+ }
+ }
+ if (test_mode == PreParseAndParse || test_mode == Parse) {
+ Handle<String> str = v8::Utils::OpenHandle(*source_handle);
+ i::Isolate* internal_isolate = str->GetIsolate();
+ Handle<Script> script = internal_isolate->factory()->NewScript(str);
+ CompilationInfoWithZone info(script);
+ info.MarkAsGlobal();
+ i::ScriptDataImpl* cached_data_impl =
+ static_cast<i::ScriptDataImpl*>(cached_data);
+ if (test_mode == PreParseAndParse) {
+ info.SetCachedData(&cached_data_impl,
+ i::CONSUME_CACHED_DATA);
+ }
+ info.SetContext(v8::Utils::OpenHandle(*context));
+ ElapsedTimer timer;
+ timer.Start();
+ // Allow lazy parsing; otherwise the preparse data won't help.
+ bool success = Parser::Parse(&info, true);
+ parse_time = timer.Elapsed();
+ if (!success) {
+ fprintf(stderr, "Parsing failed\n");
+ return std::make_pair(TimeDelta(), TimeDelta());
+ }
+ }
+ return std::make_pair(preparse_time, parse_time);
+}
+
+
+int main(int argc, char* argv[]) {
+ v8::V8::InitializeICU();
+ v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+ Encoding encoding = LATIN1;
+ TestMode test_mode = PreParseAndParse;
+ std::vector<std::string> fnames;
+ std::string benchmark;
+ int repeat = 1;
+ for (int i = 0; i < argc; ++i) {
+ if (strcmp(argv[i], "--latin1") == 0) {
+ encoding = LATIN1;
+ } else if (strcmp(argv[i], "--utf8") == 0) {
+ encoding = UTF8;
+ } else if (strcmp(argv[i], "--utf16") == 0) {
+ encoding = UTF16;
+ } else if (strcmp(argv[i], "--preparse-and-parse") == 0) {
+ test_mode = PreParseAndParse;
+ } else if (strcmp(argv[i], "--preparse") == 0) {
+ test_mode = PreParse;
+ } else if (strcmp(argv[i], "--parse") == 0) {
+ test_mode = Parse;
+ } else if (strncmp(argv[i], "--benchmark=", 12) == 0) {
+ benchmark = std::string(argv[i]).substr(12);
+ } else if (strncmp(argv[i], "--repeat=", 9) == 0) {
+ std::string repeat_str = std::string(argv[i]).substr(9);
+ repeat = atoi(repeat_str.c_str());
+ } else if (i > 0 && argv[i][0] != '-') {
+ fnames.push_back(std::string(argv[i]));
+ }
+ }
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
+ ASSERT(!context.IsEmpty());
+ {
+ v8::Context::Scope scope(context);
+ double preparse_total = 0;
+ double parse_total = 0;
+ for (size_t i = 0; i < fnames.size(); i++) {
+ std::pair<TimeDelta, TimeDelta> time = RunBaselineParser(
+ fnames[i].c_str(), encoding, repeat, isolate, context, test_mode);
+ preparse_total += time.first.InMillisecondsF();
+ parse_total += time.second.InMillisecondsF();
+ }
+ if (benchmark.empty()) benchmark = "Baseline";
+ printf("%s(PreParseRunTime): %.f ms\n", benchmark.c_str(),
+ preparse_total);
+ printf("%s(ParseRunTime): %.f ms\n", benchmark.c_str(), parse_total);
+ printf("%s(RunTime): %.f ms\n", benchmark.c_str(),
+ preparse_total + parse_total);
+ }
+ }
+ v8::V8::Dispose();
+ return 0;
+}
diff --git a/deps/v8/tools/push-to-trunk.sh b/deps/v8/tools/push-to-trunk.sh
deleted file mode 100755
index c91cd19f9..000000000
--- a/deps/v8/tools/push-to-trunk.sh
+++ /dev/null
@@ -1,412 +0,0 @@
-#!/bin/bash
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-########## Global variable definitions
-
-BRANCHNAME=prepare-push
-TRUNKBRANCH=trunk-push
-PERSISTFILE_BASENAME=/tmp/v8-push-to-trunk-tempfile
-CHROME_PATH=
-
-########## Function definitions
-
-source $(dirname $BASH_SOURCE)/common-includes.sh
-
-usage() {
-cat << EOF
-usage: $0 OPTIONS
-
-Performs the necessary steps for a V8 push to trunk. Only works for \
-git checkouts.
-
-OPTIONS:
- -h Show this message
- -s Specify the step where to start work. Default: 0.
- -l Manually specify the git commit ID of the last push to trunk.
- -c Specify the path to your Chromium src/ directory to automate the
- V8 roll.
-EOF
-}
-
-########## Option parsing
-
-while getopts ":hs:l:c:" OPTION ; do
- case $OPTION in
- h) usage
- exit 0
- ;;
- s) START_STEP=$OPTARG
- ;;
- l) LASTPUSH=$OPTARG
- ;;
- c) CHROME_PATH=$OPTARG
- ;;
- ?) echo "Illegal option: -$OPTARG"
- usage
- exit 1
- ;;
- esac
-done
-
-
-########## Regular workflow
-
-initial_environment_checks
-
-if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Preparation"
- common_prepare
- delete_branch $TRUNKBRANCH
-fi
-
-let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Create a fresh branch."
- git checkout -b $BRANCHNAME svn/bleeding_edge \
- || die "Creating branch $BRANCHNAME failed."
-fi
-
-let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Detect commit ID of last push to trunk."
- [[ -n "$LASTPUSH" ]] || LASTPUSH=$(git log -1 --format=%H ChangeLog)
- LOOP=1
- while [ $LOOP -eq 1 ] ; do
- # Print assumed commit, circumventing git's pager.
- git log -1 $LASTPUSH | cat
- confirm "Is the commit printed above the last push to trunk?"
- if [ $? -eq 0 ] ; then
- LOOP=0
- else
- LASTPUSH=$(git log -1 --format=%H $LASTPUSH^ ChangeLog)
- fi
- done
- persist "LASTPUSH"
-fi
-
-let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Prepare raw ChangeLog entry."
- # These version numbers are used again later for the trunk commit.
- read_and_persist_version
-
- DATE=$(date +%Y-%m-%d)
- persist "DATE"
- echo "$DATE: Version $MAJOR.$MINOR.$BUILD" > "$CHANGELOG_ENTRY_FILE"
- echo "" >> "$CHANGELOG_ENTRY_FILE"
- COMMITS=$(git log $LASTPUSH..HEAD --format=%H)
- for commit in $COMMITS ; do
- # Get the commit's title line.
- git log -1 $commit --format="%w(80,8,8)%s" >> "$CHANGELOG_ENTRY_FILE"
- # Grep for "BUG=xxxx" lines in the commit message and convert them to
- # "(issue xxxx)".
- git log -1 $commit --format="%B" \
- | grep "^BUG=" | grep -v "BUG=$" | grep -v "BUG=none$" \
- | sed -e 's/^/ /' \
- | sed -e 's/BUG=v8:\(.*\)$/(issue \1)/' \
- | sed -e 's/BUG=chromium:\(.*\)$/(Chromium issue \1)/' \
- | sed -e 's/BUG=\(.*\)$/(Chromium issue \1)/' \
- >> "$CHANGELOG_ENTRY_FILE"
- # Append the commit's author for reference.
- git log -1 $commit --format="%w(80,8,8)(%an)" >> "$CHANGELOG_ENTRY_FILE"
- echo "" >> "$CHANGELOG_ENTRY_FILE"
- done
- echo " Performance and stability improvements on all platforms." \
- >> "$CHANGELOG_ENTRY_FILE"
-fi
-
-let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Edit ChangeLog entry."
- echo -n "Please press <Return> to have your EDITOR open the ChangeLog entry, \
-then edit its contents to your liking. When you're done, save the file and \
-exit your EDITOR. "
- read ANSWER
- $EDITOR "$CHANGELOG_ENTRY_FILE"
- NEWCHANGELOG=$(mktemp)
- # Eliminate any trailing newlines by going through a shell variable.
- # Also (1) eliminate tabs, (2) fix too little and (3) too much indentation,
- # and (4) eliminate trailing whitespace.
- CHANGELOGENTRY=$(cat "$CHANGELOG_ENTRY_FILE" \
- | sed -e 's/\t/ /g' \
- | sed -e 's/^ \{1,7\}\([^ ]\)/ \1/g' \
- | sed -e 's/^ \{9,80\}\([^ ]\)/ \1/g' \
- | sed -e 's/ \+$//')
- [[ -n "$CHANGELOGENTRY" ]] || die "Empty ChangeLog entry."
- echo "$CHANGELOGENTRY" > "$NEWCHANGELOG"
- echo "" >> "$NEWCHANGELOG" # Explicitly insert two empty lines.
- echo "" >> "$NEWCHANGELOG"
- cat ChangeLog >> "$NEWCHANGELOG"
- mv "$NEWCHANGELOG" ChangeLog
-fi
-
-let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Increment version number."
- restore_if_unset "BUILD"
- NEWBUILD=$(($BUILD + 1))
- confirm "Automatically increment BUILD_NUMBER? (Saying 'n' will fire up \
-your EDITOR on $VERSION_FILE so you can make arbitrary changes. When \
-you're done, save the file and exit your EDITOR.)"
- if [ $? -eq 0 ] ; then
- sed -e "/#define BUILD_NUMBER/s/[0-9]*$/$NEWBUILD/" \
- -i "$VERSION_FILE"
- else
- $EDITOR "$VERSION_FILE"
- fi
- read_and_persist_version "NEW"
-fi
-
-let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Commit to local branch."
- restore_version_if_unset "NEW"
- PREPARE_COMMIT_MSG="Prepare push to trunk. \
-Now working on version $NEWMAJOR.$NEWMINOR.$NEWBUILD."
- persist "PREPARE_COMMIT_MSG"
- git commit -a -m "$PREPARE_COMMIT_MSG" \
- || die "'git commit -a' failed."
-fi
-
-upload_step
-
-let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Commit to the repository."
- wait_for_lgtm
- # Re-read the ChangeLog entry (to pick up possible changes).
- cat ChangeLog | awk --posix '{
- if ($0 ~ /^[0-9]{4}-[0-9]{2}-[0-9]{2}:/) {
- if (in_firstblock == 1) {
- exit 0;
- } else {
- in_firstblock = 1;
- }
- };
- print $0;
- }' > "$CHANGELOG_ENTRY_FILE"
- PRESUBMIT_TREE_CHECK="skip" git cl dcommit \
- || die "'git cl dcommit' failed, please try again."
-fi
-
-let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Fetch straggler commits that sneaked in \
-since this script was started."
- git svn fetch || die "'git svn fetch' failed."
- git checkout svn/bleeding_edge
- restore_if_unset "PREPARE_COMMIT_MSG"
- PREPARE_COMMIT_HASH=$(git log -1 --format=%H --grep="$PREPARE_COMMIT_MSG")
- persist "PREPARE_COMMIT_HASH"
-fi
-
-let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Squash commits into one."
- # Instead of relying on "git rebase -i", we'll just create a diff, because
- # that's easier to automate.
- restore_if_unset "PREPARE_COMMIT_HASH"
- git diff svn/trunk $PREPARE_COMMIT_HASH > "$PATCH_FILE"
- # Convert the ChangeLog entry to commit message format:
- # - remove date
- # - remove indentation
- # - merge paragraphs into single long lines, keeping empty lines between them.
- restore_if_unset "DATE"
- CHANGELOGENTRY=$(cat "$CHANGELOG_ENTRY_FILE")
- echo "$CHANGELOGENTRY" \
- | sed -e "s/^$DATE: //" \
- | sed -e 's/^ *//' \
- | awk '{
- if (need_space == 1) {
- printf(" ");
- };
- printf("%s", $0);
- if ($0 ~ /^$/) {
- printf("\n\n");
- need_space = 0;
- } else {
- need_space = 1;
- }
- }' > "$COMMITMSG_FILE" || die "Commit message editing failed."
- rm -f "$CHANGELOG_ENTRY_FILE"
-fi
-
-let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Create a new branch from trunk."
- git checkout -b $TRUNKBRANCH svn/trunk \
- || die "Checking out a new branch '$TRUNKBRANCH' failed."
-fi
-
-let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Apply squashed changes."
- rm -f "$TOUCHED_FILES_FILE"
- apply_patch "$PATCH_FILE"
- rm -f "$PATCH_FILE"
-fi
-
-let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Set correct version for trunk."
- restore_version_if_unset
- sed -e "/#define MAJOR_VERSION/s/[0-9]*$/$MAJOR/" \
- -e "/#define MINOR_VERSION/s/[0-9]*$/$MINOR/" \
- -e "/#define BUILD_NUMBER/s/[0-9]*$/$BUILD/" \
- -e "/#define PATCH_LEVEL/s/[0-9]*$/0/" \
- -e "/#define IS_CANDIDATE_VERSION/s/[0-9]*$/0/" \
- -i "$VERSION_FILE" || die "Patching $VERSION_FILE failed."
-fi
-
-let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Commit to local trunk branch."
- git add "$VERSION_FILE"
- git commit -F "$COMMITMSG_FILE" || die "'git commit' failed."
- rm -f "$COMMITMSG_FILE"
-fi
-
-let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Sanity check."
- confirm "Please check if your local checkout is sane: Inspect $VERSION_FILE, \
-compile, run tests. Do you want to commit this new trunk revision to the \
-repository?"
- [[ $? -eq 0 ]] || die "Execution canceled."
-fi
-
-let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Commit to SVN."
- git svn dcommit 2>&1 | tee >(grep -E "^Committed r[0-9]+" \
- | sed -e 's/^Committed r\([0-9]\+\)/\1/' \
- > "$TRUNK_REVISION_FILE") \
- || die "'git svn dcommit' failed."
- TRUNK_REVISION=$(cat "$TRUNK_REVISION_FILE")
- # Sometimes grepping for the revision fails. No idea why. If you figure
- # out why it is flaky, please do fix it properly.
- if [ -z "$TRUNK_REVISION" ] ; then
- echo "Sorry, grepping for the SVN revision failed. Please look for it in \
-the last command's output above and provide it manually (just the number, \
-without the leading \"r\")."
- while [ -z "$TRUNK_REVISION" ] ; do
- echo -n "> "
- read TRUNK_REVISION
- done
- fi
- persist "TRUNK_REVISION"
- rm -f "$TRUNK_REVISION_FILE"
-fi
-
-let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Tag the new revision."
- restore_version_if_unset
- git svn tag $MAJOR.$MINOR.$BUILD -m "Tagging version $MAJOR.$MINOR.$BUILD" \
- || die "'git svn tag' failed."
-fi
-
-if [ -z "$CHROME_PATH" ] ; then
- echo ">>> (asking for Chromium checkout)"
- echo -n "Do you have a \"NewGit\" Chromium checkout and want this script \
-to automate creation of the roll CL? If yes, enter the path to (and including) \
-the \"src\" directory here, otherwise just press <Return>: "
- read CHROME_PATH
-fi
-
-if [ -n "$CHROME_PATH" ] ; then
-
- let CURRENT_STEP+=1
- if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Switch to Chromium checkout."
- V8_PATH=$(pwd)
- persist "V8_PATH"
- cd "$CHROME_PATH"
- initial_environment_checks
- # Check for a clean workdir.
- [[ -z "$(git status -s -uno)" ]] \
- || die "Workspace is not clean. Please commit or undo your changes."
- # Assert that the DEPS file is there.
- [[ -w "DEPS" ]] || die "DEPS file not present or not writable; \
-current directory is: $(pwd)."
- fi
-
- let CURRENT_STEP+=1
- if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Update the checkout and create a new branch."
- git checkout master || die "'git checkout master' failed."
- git pull || die "'git pull' failed, please try again."
- restore_if_unset "TRUNK_REVISION"
- git checkout -b "v8-roll-$TRUNK_REVISION" \
- || die "Failed to checkout a new branch."
- fi
-
- let CURRENT_STEP+=1
- if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Create and upload CL."
- # Patch DEPS file.
- sed -r -e "/\"v8_revision\": /s/\"[0-9]+\"/\"$TRUNK_REVISION\"/" \
- -i DEPS
- restore_version_if_unset
- echo -n "Please enter the email address of a reviewer for the roll CL: "
- read REVIEWER
- git commit -am "Update V8 to version $MAJOR.$MINOR.$BUILD.
-
-TBR=$REVIEWER" || die "'git commit' failed."
- git cl upload --send-mail \
- || die "'git cl upload' failed, please try again."
- echo "CL uploaded."
- fi
-
- let CURRENT_STEP+=1
- if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Returning to V8 checkout."
- restore_if_unset "V8_PATH"
- cd "$V8_PATH"
- fi
-fi # if [ -n "$CHROME_PATH" ]
-
-let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
- echo ">>> Step $CURRENT_STEP: Done!"
- restore_version_if_unset
- restore_if_unset "TRUNK_REVISION"
- if [ -n "$CHROME_PATH" ] ; then
- echo "Congratulations, you have successfully created the trunk revision \
-$MAJOR.$MINOR.$BUILD and rolled it into Chromium. Please don't forget to \
-update the v8rel spreadsheet:"
- else
- echo "Congratulations, you have successfully created the trunk revision \
-$MAJOR.$MINOR.$BUILD. Please don't forget to roll this new version into \
-Chromium, and to update the v8rel spreadsheet:"
- fi
- echo -e "$MAJOR.$MINOR.$BUILD\ttrunk\t$TRUNK_REVISION"
- common_cleanup
- [[ "$TRUNKBRANCH" != "$CURRENT_BRANCH" ]] && git branch -D $TRUNKBRANCH
-fi
diff --git a/deps/v8/tools/push-to-trunk/auto_push.py b/deps/v8/tools/push-to-trunk/auto_push.py
new file mode 100755
index 000000000..9a43c3f5b
--- /dev/null
+++ b/deps/v8/tools/push-to-trunk/auto_push.py
@@ -0,0 +1,156 @@
+#!/usr/bin/env python
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import argparse
+import json
+import os
+import re
+import sys
+import urllib
+
+from common_includes import *
+import push_to_trunk
+
+SETTINGS_LOCATION = "SETTINGS_LOCATION"
+
+CONFIG = {
+ PERSISTFILE_BASENAME: "/tmp/v8-auto-roll-tempfile",
+ DOT_GIT_LOCATION: ".git",
+ SETTINGS_LOCATION: "~/.auto-roll",
+}
+
+PUSH_MESSAGE_RE = re.compile(r".* \(based on bleeding_edge revision r(\d+)\)$")
+
+
+class Preparation(Step):
+ MESSAGE = "Preparation."
+
+ def RunStep(self):
+ self.InitialEnvironmentChecks()
+ self.CommonPrepare()
+
+
+class CheckAutoPushSettings(Step):
+ MESSAGE = "Checking settings file."
+
+ def RunStep(self):
+ settings_file = os.path.realpath(self.Config(SETTINGS_LOCATION))
+ if os.path.exists(settings_file):
+ settings_dict = json.loads(FileToText(settings_file))
+ if settings_dict.get("enable_auto_roll") is False:
+ self.Die("Push to trunk disabled by auto-roll settings file: %s"
+ % settings_file)
+
+
+class CheckTreeStatus(Step):
+ MESSAGE = "Checking v8 tree status message."
+
+ def RunStep(self):
+ status_url = "https://v8-status.appspot.com/current?format=json"
+ status_json = self.ReadURL(status_url, wait_plan=[5, 20, 300, 300])
+ self["tree_message"] = json.loads(status_json)["message"]
+ if re.search(r"nopush|no push", self["tree_message"], flags=re.I):
+ self.Die("Push to trunk disabled by tree state: %s"
+ % self["tree_message"])
+
+
+class FetchLKGR(Step):
+ MESSAGE = "Fetching V8 LKGR."
+
+ def RunStep(self):
+ lkgr_url = "https://v8-status.appspot.com/lkgr"
+ # Retry several times since app engine might have issues.
+ self["lkgr"] = self.ReadURL(lkgr_url, wait_plan=[5, 20, 300, 300])
+
+
+class CheckLastPush(Step):
+ MESSAGE = "Checking last V8 push to trunk."
+
+ def RunStep(self):
+ last_push = self.FindLastTrunkPush()
+
+ # Retrieve the bleeding edge revision of the last push from the text in
+ # the push commit message.
+ last_push_title = self.GitLog(n=1, format="%s", git_hash=last_push)
+ last_push_be = PUSH_MESSAGE_RE.match(last_push_title).group(1)
+
+ if not last_push_be: # pragma: no cover
+ self.Die("Could not retrieve bleeding edge revision for trunk push %s"
+ % last_push)
+
+ # TODO(machenbach): This metric counts all revisions. It could be
+ # improved by counting only the revisions on bleeding_edge.
+ if int(self["lkgr"]) - int(last_push_be) < 10: # pragma: no cover
+ # This makes sure the script doesn't push twice in a row when the cron
+ # job retries several times.
+ self.Die("Last push too recently: %s" % last_push_be)
+
+
+class PushToTrunk(Step):
+ MESSAGE = "Pushing to trunk if specified."
+
+ def RunStep(self):
+ print "Pushing lkgr %s to trunk." % self["lkgr"]
+
+ # TODO(machenbach): Update the script before calling it.
+ if self._options.push:
+ P = push_to_trunk.PushToTrunk
+ self._side_effect_handler.Call(
+ P(push_to_trunk.CONFIG, self._side_effect_handler).Run,
+ ["--author", self._options.author,
+ "--reviewer", self._options.reviewer,
+ "--revision", self["lkgr"],
+ "--force"])
+
+
+class AutoPush(ScriptsBase):
+ def _PrepareOptions(self, parser):
+ parser.add_argument("-p", "--push",
+ help="Push to trunk. Dry run if unspecified.",
+ default=False, action="store_true")
+
+ def _ProcessOptions(self, options):
+ if not options.author or not options.reviewer: # pragma: no cover
+ print "You need to specify author and reviewer."
+ return False
+ options.requires_editor = False
+ return True
+
+ def _Steps(self):
+ return [
+ Preparation,
+ CheckAutoPushSettings,
+ CheckTreeStatus,
+ FetchLKGR,
+ CheckLastPush,
+ PushToTrunk,
+ ]
+
+
+if __name__ == "__main__": # pragma: no cover
+ sys.exit(AutoPush(CONFIG).Run())
diff --git a/deps/v8/tools/push-to-trunk/auto_roll.py b/deps/v8/tools/push-to-trunk/auto_roll.py
deleted file mode 100755
index ac2067c61..000000000
--- a/deps/v8/tools/push-to-trunk/auto_roll.py
+++ /dev/null
@@ -1,217 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import json
-import optparse
-import os
-import re
-import sys
-import urllib
-
-from common_includes import *
-import push_to_trunk
-from push_to_trunk import PushToTrunkOptions
-from push_to_trunk import RunPushToTrunk
-
-SETTINGS_LOCATION = "SETTINGS_LOCATION"
-
-CONFIG = {
- PERSISTFILE_BASENAME: "/tmp/v8-auto-roll-tempfile",
- DOT_GIT_LOCATION: ".git",
- SETTINGS_LOCATION: "~/.auto-roll",
-}
-
-
-class AutoRollOptions(CommonOptions):
- def __init__(self, options):
- super(AutoRollOptions, self).__init__(options)
- self.requires_editor = False
- self.status_password = options.status_password
- self.r = options.r
- self.c = options.c
- self.push = getattr(options, 'push', False)
-
-
-class Preparation(Step):
- MESSAGE = "Preparation."
-
- def RunStep(self):
- self.InitialEnvironmentChecks()
- self.CommonPrepare()
-
-
-class CheckAutoRollSettings(Step):
- MESSAGE = "Checking settings file."
-
- def RunStep(self):
- settings_file = os.path.realpath(self.Config(SETTINGS_LOCATION))
- if os.path.exists(settings_file):
- settings_dict = json.loads(FileToText(settings_file))
- if settings_dict.get("enable_auto_roll") is False:
- self.Die("Push to trunk disabled by auto-roll settings file: %s"
- % settings_file)
-
-
-class CheckTreeStatus(Step):
- MESSAGE = "Checking v8 tree status message."
-
- def RunStep(self):
- status_url = "https://v8-status.appspot.com/current?format=json"
- status_json = self.ReadURL(status_url, wait_plan=[5, 20, 300, 300])
- message = json.loads(status_json)["message"]
- if re.search(r"nopush|no push", message, flags=re.I):
- self.Die("Push to trunk disabled by tree state: %s" % message)
- self.Persist("tree_message", message)
-
-
-class FetchLatestRevision(Step):
- MESSAGE = "Fetching latest V8 revision."
-
- def RunStep(self):
- log = self.Git("svn log -1 --oneline").strip()
- match = re.match(r"^r(\d+) ", log)
- if not match:
- self.Die("Could not extract current svn revision from log.")
- self.Persist("latest", match.group(1))
-
-
-class CheckLastPush(Step):
- MESSAGE = "Checking last V8 push to trunk."
-
- def RunStep(self):
- self.RestoreIfUnset("latest")
- log = self.Git("svn log -1 --oneline ChangeLog").strip()
- match = re.match(r"^r(\d+) \| Prepare push to trunk", log)
- if match:
- latest = int(self._state["latest"])
- last_push = int(match.group(1))
- # TODO(machebach): This metric counts all revisions. It could be
- # improved by counting only the revisions on bleeding_edge.
- if latest - last_push < 10:
- # This makes sure the script doesn't push twice in a row when the cron
- # job retries several times.
- self.Die("Last push too recently: %d" % last_push)
-
-
-class FetchLKGR(Step):
- MESSAGE = "Fetching V8 LKGR."
-
- def RunStep(self):
- lkgr_url = "https://v8-status.appspot.com/lkgr"
- # Retry several times since app engine might have issues.
- self.Persist("lkgr", self.ReadURL(lkgr_url, wait_plan=[5, 20, 300, 300]))
-
-
-class PushToTrunk(Step):
- MESSAGE = "Pushing to trunk if possible."
-
- def PushTreeStatus(self, message):
- if not self._options.status_password:
- print "Skipping tree status update without password file."
- return
- params = {
- "message": message,
- "username": "v8-auto-roll@chromium.org",
- "password": FileToText(self._options.status_password).strip(),
- }
- params = urllib.urlencode(params)
- print "Pushing tree status: '%s'" % message
- self.ReadURL("https://v8-status.appspot.com/status", params,
- wait_plan=[5, 20])
-
- def RunStep(self):
- self.RestoreIfUnset("latest")
- self.RestoreIfUnset("lkgr")
- self.RestoreIfUnset("tree_message")
- latest = int(self._state["latest"])
- lkgr = int(self._state["lkgr"])
- if latest == lkgr:
- print "ToT (r%d) is clean. Pushing to trunk." % latest
- self.PushTreeStatus("Tree is closed (preparing to push)")
-
- # TODO(machenbach): Call push to trunk script.
- # TODO(machenbach): Update the script before calling it.
- try:
- if self._options.push:
- self._side_effect_handler.Call(
- RunPushToTrunk,
- push_to_trunk.CONFIG,
- PushToTrunkOptions.MakeForcedOptions(self._options.r,
- self._options.c),
- self._side_effect_handler)
- finally:
- self.PushTreeStatus(self._state["tree_message"])
- else:
- print("ToT (r%d) is ahead of the LKGR (r%d). Skipping push to trunk."
- % (latest, lkgr))
-
-
-def RunAutoRoll(config,
- options,
- side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
- step_classes = [
- Preparation,
- CheckAutoRollSettings,
- CheckTreeStatus,
- FetchLatestRevision,
- CheckLastPush,
- FetchLKGR,
- PushToTrunk,
- ]
- RunScript(step_classes, config, options, side_effect_handler)
-
-
-def BuildOptions():
- result = optparse.OptionParser()
- result.add_option("-c", "--chromium", dest="c",
- help=("Specify the path to your Chromium src/ "
- "directory to automate the V8 roll."))
- result.add_option("-p", "--push",
- help="Push to trunk if possible. Dry run if unspecified.",
- default=False, action="store_true")
- result.add_option("-r", "--reviewer", dest="r",
- help=("Specify the account name to be used for reviews."))
- result.add_option("-s", "--step", dest="s",
- help="Specify the step where to start work. Default: 0.",
- default=0, type="int")
- result.add_option("--status-password",
- help="A file with the password to the status app.")
- return result
-
-
-def Main():
- parser = BuildOptions()
- (options, args) = parser.parse_args()
- if not options.c or not options.r:
- print "You need to specify the chromium src location and a reviewer."
- parser.print_help()
- return 1
- RunAutoRoll(CONFIG, AutoRollOptions(options))
-
-if __name__ == "__main__":
- sys.exit(Main())
diff --git a/deps/v8/tools/push-to-trunk/chromium_roll.py b/deps/v8/tools/push-to-trunk/chromium_roll.py
new file mode 100755
index 000000000..ef9b8bf38
--- /dev/null
+++ b/deps/v8/tools/push-to-trunk/chromium_roll.py
@@ -0,0 +1,170 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import os
+import sys
+
+from common_includes import *
+
+DEPS_FILE = "DEPS_FILE"
+CHROMIUM = "CHROMIUM"
+
+CONFIG = {
+ PERSISTFILE_BASENAME: "/tmp/v8-chromium-roll-tempfile",
+ DOT_GIT_LOCATION: ".git",
+ DEPS_FILE: "DEPS",
+}
+
+
+class Preparation(Step):
+ MESSAGE = "Preparation."
+
+ def RunStep(self):
+ self.CommonPrepare()
+
+
+class DetectLastPush(Step):
+ MESSAGE = "Detect commit ID of last push to trunk."
+
+ def RunStep(self):
+ self["last_push"] = self._options.last_push or self.FindLastTrunkPush()
+ self["trunk_revision"] = self.GitSVNFindSVNRev(self["last_push"])
+ self["push_title"] = self.GitLog(n=1, format="%s",
+ git_hash=self["last_push"])
+
+
+class CheckChromium(Step):
+ MESSAGE = "Ask for chromium checkout."
+
+ def Run(self):
+ self["chrome_path"] = self._options.chromium
+ while not self["chrome_path"]:
+ self.DieNoManualMode("Please specify the path to a Chromium checkout in "
+ "forced mode.")
+ print ("Please specify the path to the chromium \"src\" directory: "),
+ self["chrome_path"] = self.ReadLine()
+
+
+class SwitchChromium(Step):
+ MESSAGE = "Switch to Chromium checkout."
+ REQUIRES = "chrome_path"
+
+ def RunStep(self):
+ self["v8_path"] = os.getcwd()
+ os.chdir(self["chrome_path"])
+ self.InitialEnvironmentChecks()
+ # Check for a clean workdir.
+ if not self.GitIsWorkdirClean(): # pragma: no cover
+ self.Die("Workspace is not clean. Please commit or undo your changes.")
+ # Assert that the DEPS file is there.
+ if not os.path.exists(self.Config(DEPS_FILE)): # pragma: no cover
+ self.Die("DEPS file not present.")
+
+
+class UpdateChromiumCheckout(Step):
+ MESSAGE = "Update the checkout and create a new branch."
+ REQUIRES = "chrome_path"
+
+ def RunStep(self):
+ os.chdir(self["chrome_path"])
+ self.GitCheckout("master")
+ self.GitPull()
+ self.GitCreateBranch("v8-roll-%s" % self["trunk_revision"])
+
+
+class UploadCL(Step):
+ MESSAGE = "Create and upload CL."
+ REQUIRES = "chrome_path"
+
+ def RunStep(self):
+ os.chdir(self["chrome_path"])
+
+ # Patch DEPS file.
+ deps = FileToText(self.Config(DEPS_FILE))
+ deps = re.sub("(?<=\"v8_revision\": \")([0-9]+)(?=\")",
+ self["trunk_revision"],
+ deps)
+ TextToFile(deps, self.Config(DEPS_FILE))
+
+ if self._options.reviewer:
+ print "Using account %s for review." % self._options.reviewer
+ rev = self._options.reviewer
+ else:
+ print "Please enter the email address of a reviewer for the roll CL: ",
+ self.DieNoManualMode("A reviewer must be specified in forced mode.")
+ rev = self.ReadLine()
+
+ commit_title = "Update V8 to %s." % self["push_title"].lower()
+ self.GitCommit("%s\n\nTBR=%s" % (commit_title, rev))
+ self.GitUpload(author=self._options.author,
+ force=self._options.force_upload)
+ print "CL uploaded."
+
+
+class SwitchV8(Step):
+ MESSAGE = "Returning to V8 checkout."
+ REQUIRES = "chrome_path"
+
+ def RunStep(self):
+ os.chdir(self["v8_path"])
+
+
+class CleanUp(Step):
+ MESSAGE = "Done!"
+
+ def RunStep(self):
+ print("Congratulations, you have successfully rolled the push r%s it into "
+ "Chromium. Please don't forget to update the v8rel spreadsheet."
+ % self["trunk_revision"])
+
+ # Clean up all temporary files.
+ Command("rm", "-f %s*" % self._config[PERSISTFILE_BASENAME])
+
+
+class ChromiumRoll(ScriptsBase):
+ def _PrepareOptions(self, parser):
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument("-f", "--force",
+ help="Don't prompt the user.",
+ default=False, action="store_true")
+ group.add_argument("-m", "--manual",
+ help="Prompt the user at every important step.",
+ default=False, action="store_true")
+ parser.add_argument("-c", "--chromium",
+ help=("The path to your Chromium src/ "
+ "directory to automate the V8 roll."))
+ parser.add_argument("-l", "--last-push",
+ help="The git commit ID of the last push to trunk.")
+
+ def _ProcessOptions(self, options): # pragma: no cover
+ if not options.manual and not options.reviewer:
+ print "A reviewer (-r) is required in (semi-)automatic mode."
+ return False
+ if not options.manual and not options.chromium:
+ print "A chromium checkout (-c) is required in (semi-)automatic mode."
+ return False
+ if not options.manual and not options.author:
+ print "Specify your chromium.org email with -a in (semi-)automatic mode."
+ return False
+
+ options.tbr_commit = not options.manual
+ return True
+
+ def _Steps(self):
+ return [
+ Preparation,
+ DetectLastPush,
+ CheckChromium,
+ SwitchChromium,
+ UpdateChromiumCheckout,
+ UploadCL,
+ SwitchV8,
+ CleanUp,
+ ]
+
+
+if __name__ == "__main__": # pragma: no cover
+ sys.exit(ChromiumRoll(CONFIG).Run())
diff --git a/deps/v8/tools/push-to-trunk/common_includes.py b/deps/v8/tools/push-to-trunk/common_includes.py
index 410be8bbc..39b689134 100644
--- a/deps/v8/tools/push-to-trunk/common_includes.py
+++ b/deps/v8/tools/push-to-trunk/common_includes.py
@@ -26,7 +26,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import argparse
import datetime
+import json
import os
import re
import subprocess
@@ -35,6 +37,8 @@ import textwrap
import time
import urllib2
+from git_recipes import GitRecipesMixin
+
PERSISTFILE_BASENAME = "PERSISTFILE_BASENAME"
TEMP_BRANCH = "TEMP_BRANCH"
BRANCHNAME = "BRANCHNAME"
@@ -80,14 +84,6 @@ def Fill80(line):
subsequent_indent=" ")
-def GetLastChangeLogEntries(change_log_file):
- result = []
- for line in LinesInFile(change_log_file):
- if re.search(r"^\d{4}-\d{2}-\d{2}:", line) and result: break
- result.append(line)
- return "".join(result)
-
-
def MakeComment(text):
return MSub(r"^( ?)", "#", text)
@@ -188,7 +184,7 @@ def Command(cmd, args="", prefix="", pipe=True):
# Wrapper for side effects.
-class SideEffectHandler(object):
+class SideEffectHandler(object): # pragma: no cover
def Call(self, fun, *args, **kwargs):
return fun(*args, **kwargs)
@@ -219,17 +215,11 @@ class NoRetryException(Exception):
pass
-class CommonOptions(object):
- def __init__(self, options, manual=True):
- self.requires_editor = True
- self.wait_for_lgtm = True
- self.s = options.s
- self.force_readline_defaults = not manual
- self.force_upload = not manual
- self.manual = manual
+class GitFailedException(Exception):
+ pass
-class Step(object):
+class Step(GitRecipesMixin):
def __init__(self, text, requires, number, config, state, options, handler):
self._text = text
self._requires = requires
@@ -242,20 +232,37 @@ class Step(object):
assert self._config is not None
assert self._state is not None
assert self._side_effect_handler is not None
- assert isinstance(options, CommonOptions)
+
+ def __getitem__(self, key):
+ # Convenience method to allow direct [] access on step classes for
+ # manipulating the backed state dict.
+ return self._state[key]
+
+ def __setitem__(self, key, value):
+ # Convenience method to allow direct [] access on step classes for
+ # manipulating the backed state dict.
+ self._state[key] = value
def Config(self, key):
return self._config[key]
def Run(self):
- if self._requires:
- self.RestoreIfUnset(self._requires)
- if not self._state[self._requires]:
- return
+ # Restore state.
+ state_file = "%s-state.json" % self._config[PERSISTFILE_BASENAME]
+ if not self._state and os.path.exists(state_file):
+ self._state.update(json.loads(FileToText(state_file)))
+
+ # Skip step if requirement is not met.
+ if self._requires and not self._state.get(self._requires):
+ return
+
print ">>> Step %d: %s" % (self._number, self._text)
self.RunStep()
- def RunStep(self):
+ # Persist state.
+ TextToFile(json.dumps(self._state), state_file)
+
+ def RunStep(self): # pragma: no cover
raise NotImplementedError
def Retry(self, cb, retry_on=None, wait_plan=None):
@@ -280,7 +287,7 @@ class Step(object):
except Exception:
got_exception = True
if got_exception or retry_on(result):
- if not wait_plan:
+ if not wait_plan: # pragma: no cover
raise Exception("Retried too often. Giving up.")
wait_time = wait_plan.pop()
print "Waiting for %f seconds." % wait_time
@@ -299,6 +306,13 @@ class Step(object):
def Git(self, args="", prefix="", pipe=True, retry_on=None):
cmd = lambda: self._side_effect_handler.Command("git", args, prefix, pipe)
+ result = self.Retry(cmd, retry_on, [5, 30])
+ if result is None:
+ raise GitFailedException("'git %s' failed." % args)
+ return result
+
+ def SVN(self, args="", prefix="", pipe=True, retry_on=None):
+ cmd = lambda: self._side_effect_handler.Command("svn", args, prefix, pipe)
return self.Retry(cmd, retry_on, [5, 30])
def Editor(self, args):
@@ -321,7 +335,7 @@ class Step(object):
raise Exception(msg)
def DieNoManualMode(self, msg=""):
- if not self._options.manual:
+ if not self._options.manual: # pragma: no cover
msg = msg or "Only available in manual mode."
self.Die(msg)
@@ -331,77 +345,52 @@ class Step(object):
return answer == "" or answer == "Y" or answer == "y"
def DeleteBranch(self, name):
- git_result = self.Git("branch").strip()
- for line in git_result.splitlines():
+ for line in self.GitBranch().splitlines():
if re.match(r".*\s+%s$" % name, line):
msg = "Branch %s exists, do you want to delete it?" % name
if self.Confirm(msg):
- if self.Git("branch -D %s" % name) is None:
- self.Die("Deleting branch '%s' failed." % name)
+ self.GitDeleteBranch(name)
print "Branch %s deleted." % name
else:
msg = "Can't continue. Please delete branch %s and try again." % name
self.Die(msg)
- def Persist(self, var, value):
- value = value or "__EMPTY__"
- TextToFile(value, "%s-%s" % (self._config[PERSISTFILE_BASENAME], var))
-
- def Restore(self, var):
- value = FileToText("%s-%s" % (self._config[PERSISTFILE_BASENAME], var))
- value = value or self.Die("Variable '%s' could not be restored." % var)
- return "" if value == "__EMPTY__" else value
-
- def RestoreIfUnset(self, var_name):
- if self._state.get(var_name) is None:
- self._state[var_name] = self.Restore(var_name)
-
def InitialEnvironmentChecks(self):
# Cancel if this is not a git checkout.
- if not os.path.exists(self._config[DOT_GIT_LOCATION]):
+ if not os.path.exists(self._config[DOT_GIT_LOCATION]): # pragma: no cover
self.Die("This is not a git checkout, this script won't work for you.")
# Cancel if EDITOR is unset or not executable.
if (self._options.requires_editor and (not os.environ.get("EDITOR") or
- Command("which", os.environ["EDITOR"]) is None)):
+ Command("which", os.environ["EDITOR"]) is None)): # pragma: no cover
self.Die("Please set your EDITOR environment variable, you'll need it.")
def CommonPrepare(self):
# Check for a clean workdir.
- if self.Git("status -s -uno").strip() != "":
+ if not self.GitIsWorkdirClean(): # pragma: no cover
self.Die("Workspace is not clean. Please commit or undo your changes.")
# Persist current branch.
- current_branch = ""
- git_result = self.Git("status -s -b -uno").strip()
- for line in git_result.splitlines():
- match = re.match(r"^## (.+)", line)
- if match:
- current_branch = match.group(1)
- break
- self.Persist("current_branch", current_branch)
+ self["current_branch"] = self.GitCurrentBranch()
# Fetch unfetched revisions.
- if self.Git("svn fetch") is None:
- self.Die("'git svn fetch' failed.")
+ self.GitSVNFetch()
def PrepareBranch(self):
# Get ahold of a safe temporary branch and check it out.
- self.RestoreIfUnset("current_branch")
- if self._state["current_branch"] != self._config[TEMP_BRANCH]:
+ if self["current_branch"] != self._config[TEMP_BRANCH]:
self.DeleteBranch(self._config[TEMP_BRANCH])
- self.Git("checkout -b %s" % self._config[TEMP_BRANCH])
+ self.GitCreateBranch(self._config[TEMP_BRANCH])
# Delete the branch that will be created later if it exists already.
self.DeleteBranch(self._config[BRANCHNAME])
def CommonCleanup(self):
- self.RestoreIfUnset("current_branch")
- self.Git("checkout -f %s" % self._state["current_branch"])
- if self._config[TEMP_BRANCH] != self._state["current_branch"]:
- self.Git("branch -D %s" % self._config[TEMP_BRANCH])
- if self._config[BRANCHNAME] != self._state["current_branch"]:
- self.Git("branch -D %s" % self._config[BRANCHNAME])
+ self.GitCheckout(self["current_branch"])
+ if self._config[TEMP_BRANCH] != self["current_branch"]:
+ self.GitDeleteBranch(self._config[TEMP_BRANCH])
+ if self._config[BRANCHNAME] != self["current_branch"]:
+ self.GitDeleteBranch(self._config[BRANCHNAME])
# Clean up all temporary files.
Command("rm", "-f %s*" % self._config[PERSISTFILE_BASENAME])
@@ -411,8 +400,7 @@ class Step(object):
match = re.match(r"^#define %s\s+(\d*)" % def_name, line)
if match:
value = match.group(1)
- self.Persist("%s%s" % (prefix, var_name), value)
- self._state["%s%s" % (prefix, var_name)] = value
+ self["%s%s" % (prefix, var_name)] = value
for line in LinesInFile(self._config[VERSION_FILE]):
for (var_name, def_name) in [("major", "MAJOR_VERSION"),
("minor", "MINOR_VERSION"),
@@ -420,10 +408,6 @@ class Step(object):
("patch", "PATCH_LEVEL")]:
ReadAndPersist(var_name, def_name)
- def RestoreVersionIfUnset(self, prefix=""):
- for v in ["major", "minor", "build", "patch"]:
- self.RestoreIfUnset("%s%s" % (prefix, v))
-
def WaitForLGTM(self):
print ("Please wait for an LGTM, then type \"LGTM<Return>\" to commit "
"your change. (If you need to iterate on the patch or double check "
@@ -451,29 +435,31 @@ class Step(object):
answer = self.ReadLine()
# Takes a file containing the patch to apply as first argument.
- def ApplyPatch(self, patch_file, reverse_patch=""):
- args = "apply --index --reject %s \"%s\"" % (reverse_patch, patch_file)
- if self.Git(args) is None:
+ def ApplyPatch(self, patch_file, revert=False):
+ try:
+ self.GitApplyPatch(patch_file, revert)
+ except GitFailedException:
self.WaitForResolvingConflicts(patch_file)
+ def FindLastTrunkPush(self, parent_hash=""):
+ push_pattern = "^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based"
+ branch = "" if parent_hash else "svn/trunk"
+ return self.GitLog(n=1, format="%H", grep=push_pattern,
+ parent_hash=parent_hash, branch=branch)
+
class UploadStep(Step):
MESSAGE = "Upload for code review."
def RunStep(self):
- if self._options.r:
- print "Using account %s for review." % self._options.r
- reviewer = self._options.r
+ if self._options.reviewer:
+ print "Using account %s for review." % self._options.reviewer
+ reviewer = self._options.reviewer
else:
print "Please enter the email address of a V8 reviewer for your patch: ",
self.DieNoManualMode("A reviewer must be specified in forced mode.")
reviewer = self.ReadLine()
- force_flag = " -f" if self._options.force_upload else ""
- args = "cl upload -r \"%s\" --send-mail%s" % (reviewer, force_flag)
- # TODO(machenbach): Check output in forced mode. Verify that all required
- # base files were uploaded, if not retry.
- if self.Git(args, pipe=False) is None:
- self.Die("'git cl upload' failed, please try again.")
+ self.GitUpload(reviewer, self._options.author, self._options.force_upload)
def MakeStep(step_class=Step, number=0, state=None, config=None,
@@ -496,15 +482,81 @@ def MakeStep(step_class=Step, number=0, state=None, config=None,
handler=side_effect_handler)
-def RunScript(step_classes,
- config,
- options,
- side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
- state = {}
- steps = []
- for (number, step_class) in enumerate(step_classes):
- steps.append(MakeStep(step_class, number, state, config,
- options, side_effect_handler))
+class ScriptsBase(object):
+ # TODO(machenbach): Move static config here.
+ def __init__(self, config, side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER,
+ state=None):
+ self._config = config
+ self._side_effect_handler = side_effect_handler
+ self._state = state if state is not None else {}
+
+ def _Description(self):
+ return None
+
+ def _PrepareOptions(self, parser):
+ pass
+
+ def _ProcessOptions(self, options):
+ return True
+
+ def _Steps(self): # pragma: no cover
+ raise Exception("Not implemented.")
- for step in steps[options.s:]:
- step.Run()
+ def MakeOptions(self, args=None):
+ parser = argparse.ArgumentParser(description=self._Description())
+ parser.add_argument("-a", "--author", default="",
+ help="The author email used for rietveld.")
+ parser.add_argument("-r", "--reviewer", default="",
+ help="The account name to be used for reviews.")
+ parser.add_argument("-s", "--step",
+ help="Specify the step where to start work. Default: 0.",
+ default=0, type=int)
+
+ self._PrepareOptions(parser)
+
+ if args is None: # pragma: no cover
+ options = parser.parse_args()
+ else:
+ options = parser.parse_args(args)
+
+ # Process common options.
+ if options.step < 0: # pragma: no cover
+ print "Bad step number %d" % options.step
+ parser.print_help()
+ return None
+
+ # Defaults for options, common to all scripts.
+ options.manual = getattr(options, "manual", True)
+ options.force = getattr(options, "force", False)
+
+ # Derived options.
+ options.requires_editor = not options.force
+ options.wait_for_lgtm = not options.force
+ options.force_readline_defaults = not options.manual
+ options.force_upload = not options.manual
+
+ # Process script specific options.
+ if not self._ProcessOptions(options):
+ parser.print_help()
+ return None
+ return options
+
+ def RunSteps(self, step_classes, args=None):
+ options = self.MakeOptions(args)
+ if not options:
+ return 1
+
+ state_file = "%s-state.json" % self._config[PERSISTFILE_BASENAME]
+ if options.step == 0 and os.path.exists(state_file):
+ os.remove(state_file)
+
+ steps = []
+ for (number, step_class) in enumerate(step_classes):
+ steps.append(MakeStep(step_class, number, self._state, self._config,
+ options, self._side_effect_handler))
+ for step in steps[options.step:]:
+ step.Run()
+ return 0
+
+ def Run(self, args=None):
+ return self.RunSteps(self._Steps(), args)
diff --git a/deps/v8/tools/push-to-trunk/git_recipes.py b/deps/v8/tools/push-to-trunk/git_recipes.py
new file mode 100644
index 000000000..8e84d4533
--- /dev/null
+++ b/deps/v8/tools/push-to-trunk/git_recipes.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+
+def Strip(f):
+ def new_f(*args, **kwargs):
+ return f(*args, **kwargs).strip()
+ return new_f
+
+
+def MakeArgs(l):
+ """['-a', '', 'abc', ''] -> '-a abc'"""
+ return " ".join(filter(None, l))
+
+
+def Quoted(s):
+ return "\"%s\"" % s
+
+
+class GitRecipesMixin(object):
+ def GitIsWorkdirClean(self):
+ return self.Git("status -s -uno").strip() == ""
+
+ @Strip
+ def GitBranch(self):
+ return self.Git("branch")
+
+ def GitCreateBranch(self, name, branch=""):
+ assert name
+ self.Git(MakeArgs(["checkout -b", name, branch]))
+
+ def GitDeleteBranch(self, name):
+ assert name
+ self.Git(MakeArgs(["branch -D", name]))
+
+ def GitCheckout(self, name):
+ assert name
+ self.Git(MakeArgs(["checkout -f", name]))
+
+ def GitCheckoutFile(self, name, branch_or_hash):
+ assert name
+ assert branch_or_hash
+ self.Git(MakeArgs(["checkout -f", branch_or_hash, "--", name]))
+
+ @Strip
+ def GitCurrentBranch(self):
+ for line in self.Git("status -s -b -uno").strip().splitlines():
+ match = re.match(r"^## (.+)", line)
+ if match: return match.group(1)
+ raise Exception("Couldn't find curent branch.") # pragma: no cover
+
+ @Strip
+ def GitLog(self, n=0, format="", grep="", git_hash="", parent_hash="",
+ branch="", reverse=False):
+ assert not (git_hash and parent_hash)
+ args = ["log"]
+ if n > 0:
+ args.append("-%d" % n)
+ if format:
+ args.append("--format=%s" % format)
+ if grep:
+ args.append("--grep=\"%s\"" % grep)
+ if reverse:
+ args.append("--reverse")
+ if git_hash:
+ args.append(git_hash)
+ if parent_hash:
+ args.append("%s^" % parent_hash)
+ args.append(branch)
+ return self.Git(MakeArgs(args))
+
+ def GitGetPatch(self, git_hash):
+ assert git_hash
+ return self.Git(MakeArgs(["log", "-1", "-p", git_hash]))
+
+ def GitAdd(self, name):
+ assert name
+ self.Git(MakeArgs(["add", Quoted(name)]))
+
+ def GitApplyPatch(self, patch_file, reverse=False):
+ assert patch_file
+ args = ["apply --index --reject"]
+ if reverse:
+ args.append("--reverse")
+ args.append(Quoted(patch_file))
+ self.Git(MakeArgs(args))
+
+ def GitUpload(self, reviewer="", author="", force=False):
+ args = ["cl upload --send-mail"]
+ if author:
+ args += ["--email", Quoted(author)]
+ if reviewer:
+ args += ["-r", Quoted(reviewer)]
+ if force:
+ args.append("-f")
+ # TODO(machenbach): Check output in forced mode. Verify that all required
+ # base files were uploaded, if not retry.
+ self.Git(MakeArgs(args), pipe=False)
+
+ def GitCommit(self, message="", file_name=""):
+ assert message or file_name
+ args = ["commit"]
+ if file_name:
+ args += ["-aF", Quoted(file_name)]
+ if message:
+ args += ["-am", Quoted(message)]
+ self.Git(MakeArgs(args))
+
+ def GitPresubmit(self):
+ self.Git("cl presubmit", "PRESUBMIT_TREE_CHECK=\"skip\"")
+
+ def GitDCommit(self):
+ self.Git("cl dcommit -f --bypass-hooks", retry_on=lambda x: x is None)
+
+ def GitDiff(self, loc1, loc2):
+ return self.Git(MakeArgs(["diff", loc1, loc2]))
+
+ def GitPull(self):
+ self.Git("pull")
+
+ def GitSVNFetch(self):
+ self.Git("svn fetch")
+
+ @Strip
+ def GitSVNLog(self):
+ return self.Git("svn log -1 --oneline")
+
+ @Strip
+ def GitSVNFindGitHash(self, revision, branch=""):
+ assert revision
+ return self.Git(MakeArgs(["svn find-rev", "r%s" % revision, branch]))
+
+ @Strip
+ def GitSVNFindSVNRev(self, git_hash, branch=""):
+ return self.Git(MakeArgs(["svn find-rev", git_hash, branch]))
+
+ def GitSVNDCommit(self):
+ return self.Git("svn dcommit 2>&1", retry_on=lambda x: x is None)
+
+ def GitSVNTag(self, version):
+ self.Git(("svn tag %s -m \"Tagging version %s\"" % (version, version)),
+ retry_on=lambda x: x is None)
diff --git a/deps/v8/tools/push-to-trunk/merge_to_branch.py b/deps/v8/tools/push-to-trunk/merge_to_branch.py
new file mode 100755
index 000000000..f0acd143e
--- /dev/null
+++ b/deps/v8/tools/push-to-trunk/merge_to_branch.py
@@ -0,0 +1,331 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import argparse
+from collections import OrderedDict
+import sys
+
+from common_includes import *
+
+ALREADY_MERGING_SENTINEL_FILE = "ALREADY_MERGING_SENTINEL_FILE"
+COMMIT_HASHES_FILE = "COMMIT_HASHES_FILE"
+TEMPORARY_PATCH_FILE = "TEMPORARY_PATCH_FILE"
+
+CONFIG = {
+ BRANCHNAME: "prepare-merge",
+ PERSISTFILE_BASENAME: "/tmp/v8-merge-to-branch-tempfile",
+ ALREADY_MERGING_SENTINEL_FILE:
+ "/tmp/v8-merge-to-branch-tempfile-already-merging",
+ TEMP_BRANCH: "prepare-merge-temporary-branch-created-by-script",
+ DOT_GIT_LOCATION: ".git",
+ VERSION_FILE: "src/version.cc",
+ TEMPORARY_PATCH_FILE: "/tmp/v8-prepare-merge-tempfile-temporary-patch",
+ COMMITMSG_FILE: "/tmp/v8-prepare-merge-tempfile-commitmsg",
+ COMMIT_HASHES_FILE: "/tmp/v8-merge-to-branch-tempfile-PATCH_COMMIT_HASHES",
+}
+
+
+class Preparation(Step):
+ MESSAGE = "Preparation."
+
+ def RunStep(self):
+ if os.path.exists(self.Config(ALREADY_MERGING_SENTINEL_FILE)):
+ if self._options.force:
+ os.remove(self.Config(ALREADY_MERGING_SENTINEL_FILE))
+ elif self._options.step == 0: # pragma: no cover
+ self.Die("A merge is already in progress")
+ open(self.Config(ALREADY_MERGING_SENTINEL_FILE), "a").close()
+
+ self.InitialEnvironmentChecks()
+ if self._options.revert_bleeding_edge:
+ self["merge_to_branch"] = "bleeding_edge"
+ elif self._options.branch:
+ self["merge_to_branch"] = self._options.branch
+ else: # pragma: no cover
+ self.Die("Please specify a branch to merge to")
+
+ self.CommonPrepare()
+ self.PrepareBranch()
+
+
+class CreateBranch(Step):
+ MESSAGE = "Create a fresh branch for the patch."
+
+ def RunStep(self):
+ self.GitCreateBranch(self.Config(BRANCHNAME),
+ "svn/%s" % self["merge_to_branch"])
+
+
+class SearchArchitecturePorts(Step):
+ MESSAGE = "Search for corresponding architecture ports."
+
+ def RunStep(self):
+ self["full_revision_list"] = list(OrderedDict.fromkeys(
+ self._options.revisions))
+ port_revision_list = []
+ for revision in self["full_revision_list"]:
+ # Search for commits which matches the "Port rXXX" pattern.
+ git_hashes = self.GitLog(reverse=True, format="%H",
+ grep="Port r%d" % int(revision),
+ branch="svn/bleeding_edge")
+ for git_hash in git_hashes.splitlines():
+ svn_revision = self.GitSVNFindSVNRev(git_hash, "svn/bleeding_edge")
+ if not svn_revision: # pragma: no cover
+ self.Die("Cannot determine svn revision for %s" % git_hash)
+ revision_title = self.GitLog(n=1, format="%s", git_hash=git_hash)
+
+ # Is this revision included in the original revision list?
+ if svn_revision in self["full_revision_list"]:
+ print("Found port of r%s -> r%s (already included): %s"
+ % (revision, svn_revision, revision_title))
+ else:
+ print("Found port of r%s -> r%s: %s"
+ % (revision, svn_revision, revision_title))
+ port_revision_list.append(svn_revision)
+
+ # Do we find any port?
+ if len(port_revision_list) > 0:
+ if self.Confirm("Automatically add corresponding ports (%s)?"
+ % ", ".join(port_revision_list)):
+ #: 'y': Add ports to revision list.
+ self["full_revision_list"].extend(port_revision_list)
+
+
+class FindGitRevisions(Step):
+ MESSAGE = "Find the git revisions associated with the patches."
+
+ def RunStep(self):
+ self["patch_commit_hashes"] = []
+ for revision in self["full_revision_list"]:
+ next_hash = self.GitSVNFindGitHash(revision, "svn/bleeding_edge")
+ if not next_hash: # pragma: no cover
+ self.Die("Cannot determine git hash for r%s" % revision)
+ self["patch_commit_hashes"].append(next_hash)
+
+ # Stringify: [123, 234] -> "r123, r234"
+ self["revision_list"] = ", ".join(map(lambda s: "r%s" % s,
+ self["full_revision_list"]))
+
+ if not self["revision_list"]: # pragma: no cover
+ self.Die("Revision list is empty.")
+
+ if self._options.revert:
+ if not self._options.revert_bleeding_edge:
+ self["new_commit_msg"] = ("Rollback of %s in %s branch."
+ % (self["revision_list"], self["merge_to_branch"]))
+ else:
+ self["new_commit_msg"] = "Revert %s." % self["revision_list"]
+ else:
+ self["new_commit_msg"] = ("Merged %s into %s branch."
+ % (self["revision_list"], self["merge_to_branch"]))
+ self["new_commit_msg"] += "\n\n"
+
+ for commit_hash in self["patch_commit_hashes"]:
+ patch_merge_desc = self.GitLog(n=1, format="%s", git_hash=commit_hash)
+ self["new_commit_msg"] += "%s\n\n" % patch_merge_desc
+
+ bugs = []
+ for commit_hash in self["patch_commit_hashes"]:
+ msg = self.GitLog(n=1, git_hash=commit_hash)
+ for bug in re.findall(r"^[ \t]*BUG[ \t]*=[ \t]*(.*?)[ \t]*$", msg,
+ re.M):
+ bugs.extend(map(lambda s: s.strip(), bug.split(",")))
+ bug_aggregate = ",".join(sorted(bugs))
+ if bug_aggregate:
+ self["new_commit_msg"] += "BUG=%s\nLOG=N\n" % bug_aggregate
+ TextToFile(self["new_commit_msg"], self.Config(COMMITMSG_FILE))
+
+
+class ApplyPatches(Step):
+ MESSAGE = "Apply patches for selected revisions."
+
+ def RunStep(self):
+ for commit_hash in self["patch_commit_hashes"]:
+ print("Applying patch for %s to %s..."
+ % (commit_hash, self["merge_to_branch"]))
+ patch = self.GitGetPatch(commit_hash)
+ TextToFile(patch, self.Config(TEMPORARY_PATCH_FILE))
+ self.ApplyPatch(self.Config(TEMPORARY_PATCH_FILE), self._options.revert)
+ if self._options.patch:
+ self.ApplyPatch(self._options.patch, self._options.revert)
+
+
+class PrepareVersion(Step):
+ MESSAGE = "Prepare version file."
+
+ def RunStep(self):
+ if self._options.revert_bleeding_edge:
+ return
+ # These version numbers are used again for creating the tag
+ self.ReadAndPersistVersion()
+
+
+class IncrementVersion(Step):
+ MESSAGE = "Increment version number."
+
+ def RunStep(self):
+ if self._options.revert_bleeding_edge:
+ return
+ new_patch = str(int(self["patch"]) + 1)
+ if self.Confirm("Automatically increment PATCH_LEVEL? (Saying 'n' will "
+ "fire up your EDITOR on %s so you can make arbitrary "
+ "changes. When you're done, save the file and exit your "
+ "EDITOR.)" % self.Config(VERSION_FILE)):
+ text = FileToText(self.Config(VERSION_FILE))
+ text = MSub(r"(?<=#define PATCH_LEVEL)(?P<space>\s+)\d*$",
+ r"\g<space>%s" % new_patch,
+ text)
+ TextToFile(text, self.Config(VERSION_FILE))
+ else:
+ self.Editor(self.Config(VERSION_FILE))
+ self.ReadAndPersistVersion("new_")
+
+
+class CommitLocal(Step):
+ MESSAGE = "Commit to local branch."
+
+ def RunStep(self):
+ self.GitCommit(file_name=self.Config(COMMITMSG_FILE))
+
+
+class CommitRepository(Step):
+ MESSAGE = "Commit to the repository."
+
+ def RunStep(self):
+ self.GitCheckout(self.Config(BRANCHNAME))
+ self.WaitForLGTM()
+ self.GitPresubmit()
+ self.GitDCommit()
+
+
+class PrepareSVN(Step):
+ MESSAGE = "Determine svn commit revision."
+
+ def RunStep(self):
+ if self._options.revert_bleeding_edge:
+ return
+ self.GitSVNFetch()
+ commit_hash = self.GitLog(n=1, format="%H", grep=self["new_commit_msg"],
+ branch="svn/%s" % self["merge_to_branch"])
+ if not commit_hash: # pragma: no cover
+ self.Die("Unable to map git commit to svn revision.")
+ self["svn_revision"] = self.GitSVNFindSVNRev(commit_hash)
+ print "subversion revision number is r%s" % self["svn_revision"]
+
+
+class TagRevision(Step):
+ MESSAGE = "Create the tag."
+
+ def RunStep(self):
+ if self._options.revert_bleeding_edge:
+ return
+ self["version"] = "%s.%s.%s.%s" % (self["new_major"],
+ self["new_minor"],
+ self["new_build"],
+ self["new_patch"])
+ print "Creating tag svn/tags/%s" % self["version"]
+ if self["merge_to_branch"] == "trunk":
+ self["to_url"] = "trunk"
+ else:
+ self["to_url"] = "branches/%s" % self["merge_to_branch"]
+ self.SVN("copy -r %s https://v8.googlecode.com/svn/%s "
+ "https://v8.googlecode.com/svn/tags/%s -m "
+ "\"Tagging version %s\""
+ % (self["svn_revision"], self["to_url"],
+ self["version"], self["version"]))
+
+
+class CleanUp(Step):
+ MESSAGE = "Cleanup."
+
+ def RunStep(self):
+ self.CommonCleanup()
+ if not self._options.revert_bleeding_edge:
+ print "*** SUMMARY ***"
+ print "version: %s" % self["version"]
+ print "branch: %s" % self["to_url"]
+ print "svn revision: %s" % self["svn_revision"]
+ if self["revision_list"]:
+ print "patches: %s" % self["revision_list"]
+
+
+class MergeToBranch(ScriptsBase):
+ def _Description(self):
+ return ("Performs the necessary steps to merge revisions from "
+ "bleeding_edge to other branches, including trunk.")
+
+ def _PrepareOptions(self, parser):
+ group = parser.add_mutually_exclusive_group(required=True)
+ group.add_argument("--branch", help="The branch to merge to.")
+ group.add_argument("-R", "--revert-bleeding-edge",
+ help="Revert specified patches from bleeding edge.",
+ default=False, action="store_true")
+ parser.add_argument("revisions", nargs="*",
+ help="The revisions to merge.")
+ parser.add_argument("-f", "--force",
+ help="Delete sentinel file.",
+ default=False, action="store_true")
+ parser.add_argument("-m", "--message",
+ help="A commit message for the patch.")
+ parser.add_argument("--revert",
+ help="Revert specified patches.",
+ default=False, action="store_true")
+ parser.add_argument("-p", "--patch",
+ help="A patch file to apply as part of the merge.")
+
+ def _ProcessOptions(self, options):
+ # TODO(machenbach): Add a test that covers revert from bleeding_edge
+ if len(options.revisions) < 1:
+ if not options.patch:
+ print "Either a patch file or revision numbers must be specified"
+ return False
+ if not options.message:
+ print "You must specify a merge comment if no patches are specified"
+ return False
+ return True
+
+ def _Steps(self):
+ return [
+ Preparation,
+ CreateBranch,
+ SearchArchitecturePorts,
+ FindGitRevisions,
+ ApplyPatches,
+ PrepareVersion,
+ IncrementVersion,
+ CommitLocal,
+ UploadStep,
+ CommitRepository,
+ PrepareSVN,
+ TagRevision,
+ CleanUp,
+ ]
+
+
+if __name__ == "__main__": # pragma: no cover
+ sys.exit(MergeToBranch(CONFIG).Run())
diff --git a/deps/v8/tools/push-to-trunk/push_to_trunk.py b/deps/v8/tools/push-to-trunk/push_to_trunk.py
index 9c30570f5..b487b0f8f 100755
--- a/deps/v8/tools/push-to-trunk/push_to_trunk.py
+++ b/deps/v8/tools/push-to-trunk/push_to_trunk.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import optparse
+import argparse
import sys
import tempfile
import urllib2
@@ -34,8 +34,6 @@ import urllib2
from common_includes import *
TRUNKBRANCH = "TRUNKBRANCH"
-CHROMIUM = "CHROMIUM"
-DEPS_FILE = "DEPS_FILE"
CONFIG = {
BRANCHNAME: "prepare-push",
@@ -48,33 +46,11 @@ CONFIG = {
CHANGELOG_ENTRY_FILE: "/tmp/v8-push-to-trunk-tempfile-changelog-entry",
PATCH_FILE: "/tmp/v8-push-to-trunk-tempfile-patch-file",
COMMITMSG_FILE: "/tmp/v8-push-to-trunk-tempfile-commitmsg",
- DEPS_FILE: "DEPS",
}
+PUSH_MESSAGE_SUFFIX = " (based on bleeding_edge revision r%d)"
+PUSH_MESSAGE_RE = re.compile(r".* \(based on bleeding_edge revision r(\d+)\)$")
-class PushToTrunkOptions(CommonOptions):
- @staticmethod
- def MakeForcedOptions(reviewer, chrome_path):
- """Convenience wrapper."""
- class Options(object):
- pass
- options = Options()
- options.s = 0
- options.l = None
- options.f = True
- options.m = False
- options.r = reviewer
- options.c = chrome_path
- return PushToTrunkOptions(options)
-
- def __init__(self, options):
- super(PushToTrunkOptions, self).__init__(options, options.m)
- self.requires_editor = not options.f
- self.wait_for_lgtm = not options.f
- self.tbr_commit = not options.m
- self.l = options.l
- self.r = options.r
- self.c = options.c
class Preparation(Step):
MESSAGE = "Preparation."
@@ -90,26 +66,86 @@ class FreshBranch(Step):
MESSAGE = "Create a fresh branch."
def RunStep(self):
- args = "checkout -b %s svn/bleeding_edge" % self.Config(BRANCHNAME)
- if self.Git(args) is None:
- self.Die("Creating branch %s failed." % self.Config(BRANCHNAME))
+ self.GitCreateBranch(self.Config(BRANCHNAME), "svn/bleeding_edge")
+
+
+class PreparePushRevision(Step):
+ MESSAGE = "Check which revision to push."
+
+ def RunStep(self):
+ if self._options.revision:
+ self["push_hash"] = self.GitSVNFindGitHash(self._options.revision)
+ else:
+ self["push_hash"] = self.GitLog(n=1, format="%H", git_hash="HEAD")
+ if not self["push_hash"]: # pragma: no cover
+ self.Die("Could not determine the git hash for the push.")
class DetectLastPush(Step):
MESSAGE = "Detect commit ID of last push to trunk."
def RunStep(self):
- last_push = (self._options.l or
- self.Git("log -1 --format=%H ChangeLog").strip())
+ last_push = self._options.last_push or self.FindLastTrunkPush()
while True:
# Print assumed commit, circumventing git's pager.
- print self.Git("log -1 %s" % last_push)
+ print self.GitLog(n=1, git_hash=last_push)
if self.Confirm("Is the commit printed above the last push to trunk?"):
break
- args = "log -1 --format=%H %s^ ChangeLog" % last_push
- last_push = self.Git(args).strip()
- self.Persist("last_push", last_push)
- self._state["last_push"] = last_push
+ last_push = self.FindLastTrunkPush(parent_hash=last_push)
+
+ if self._options.last_bleeding_edge:
+ # Read the bleeding edge revision of the last push from a command-line
+ # option.
+ last_push_bleeding_edge = self._options.last_bleeding_edge
+ else:
+ # Retrieve the bleeding edge revision of the last push from the text in
+ # the push commit message.
+ last_push_title = self.GitLog(n=1, format="%s", git_hash=last_push)
+ last_push_be_svn = PUSH_MESSAGE_RE.match(last_push_title).group(1)
+ if not last_push_be_svn: # pragma: no cover
+ self.Die("Could not retrieve bleeding edge revision for trunk push %s"
+ % last_push)
+ last_push_bleeding_edge = self.GitSVNFindGitHash(last_push_be_svn)
+ if not last_push_bleeding_edge: # pragma: no cover
+ self.Die("Could not retrieve bleeding edge git hash for trunk push %s"
+ % last_push)
+
+ # This points to the svn revision of the last push on trunk.
+ self["last_push_trunk"] = last_push
+ # This points to the last bleeding_edge revision that went into the last
+ # push.
+ # TODO(machenbach): Do we need a check to make sure we're not pushing a
+ # revision older than the last push? If we do this, the output of the
+ # current change log preparation won't make much sense.
+ self["last_push_bleeding_edge"] = last_push_bleeding_edge
+
+
+class IncrementVersion(Step):
+ MESSAGE = "Increment version number."
+
+ def RunStep(self):
+ # Retrieve current version from last trunk push.
+ self.GitCheckoutFile(self.Config(VERSION_FILE), self["last_push_trunk"])
+ self.ReadAndPersistVersion()
+
+ if self.Confirm(("Automatically increment BUILD_NUMBER? (Saying 'n' will "
+ "fire up your EDITOR on %s so you can make arbitrary "
+ "changes. When you're done, save the file and exit your "
+ "EDITOR.)" % self.Config(VERSION_FILE))):
+ text = FileToText(self.Config(VERSION_FILE))
+ text = MSub(r"(?<=#define BUILD_NUMBER)(?P<space>\s+)\d*$",
+ r"\g<space>%s" % str(int(self["build"]) + 1),
+ text)
+ TextToFile(text, self.Config(VERSION_FILE))
+ else:
+ self.Editor(self.Config(VERSION_FILE))
+
+ # Variables prefixed with 'new_' contain the new version numbers for the
+ # ongoing trunk push.
+ self.ReadAndPersistVersion("new_")
+ self["version"] = "%s.%s.%s" % (self["new_major"],
+ self["new_minor"],
+ self["new_build"])
class PrepareChangeLog(Step):
@@ -123,38 +159,30 @@ class PrepareChangeLog(Step):
match = re.search(r"^Review URL: https://codereview\.chromium\.org/(\d+)$",
body, flags=re.M)
if match:
- cl_url = "https://codereview.chromium.org/%s/description" % match.group(1)
+ cl_url = ("https://codereview.chromium.org/%s/description"
+ % match.group(1))
try:
# Fetch from Rietveld but only retry once with one second delay since
# there might be many revisions.
body = self.ReadURL(cl_url, wait_plan=[1])
- except urllib2.URLError:
+ except urllib2.URLError: # pragma: no cover
pass
return body
def RunStep(self):
- self.RestoreIfUnset("last_push")
-
- # These version numbers are used again later for the trunk commit.
- self.ReadAndPersistVersion()
-
- date = self.GetDate()
- self.Persist("date", date)
- output = "%s: Version %s.%s.%s\n\n" % (date,
- self._state["major"],
- self._state["minor"],
- self._state["build"])
+ self["date"] = self.GetDate()
+ output = "%s: Version %s\n\n" % (self["date"], self["version"])
TextToFile(output, self.Config(CHANGELOG_ENTRY_FILE))
-
- args = "log %s..HEAD --format=%%H" % self._state["last_push"]
- commits = self.Git(args).strip()
+ commits = self.GitLog(format="%H",
+ git_hash="%s..%s" % (self["last_push_bleeding_edge"],
+ self["push_hash"]))
# Cache raw commit messages.
commit_messages = [
[
- self.Git("log -1 %s --format=\"%%s\"" % commit),
- self.Reload(self.Git("log -1 %s --format=\"%%B\"" % commit)),
- self.Git("log -1 %s --format=\"%%an\"" % commit),
+ self.GitLog(n=1, format="%s", git_hash=commit),
+ self.Reload(self.GitLog(n=1, format="%B", git_hash=commit)),
+ self.GitLog(n=1, format="%an", git_hash=commit),
] for commit in commits.splitlines()
]
@@ -182,8 +210,6 @@ class EditChangeLog(Step):
"save the file and exit your EDITOR. ")
self.ReadLine(default="")
self.Editor(self.Config(CHANGELOG_ENTRY_FILE))
- handle, new_changelog = tempfile.mkstemp()
- os.close(handle)
# Strip comments and reformat with correct indentation.
changelog_entry = FileToText(self.Config(CHANGELOG_ENTRY_FILE)).rstrip()
@@ -191,74 +217,11 @@ class EditChangeLog(Step):
changelog_entry = "\n".join(map(Fill80, changelog_entry.splitlines()))
changelog_entry = changelog_entry.lstrip()
- if changelog_entry == "":
+ if changelog_entry == "": # pragma: no cover
self.Die("Empty ChangeLog entry.")
- with open(new_changelog, "w") as f:
- f.write(changelog_entry)
- f.write("\n\n\n") # Explicitly insert two empty lines.
-
- AppendToFile(FileToText(self.Config(CHANGELOG_FILE)), new_changelog)
- TextToFile(FileToText(new_changelog), self.Config(CHANGELOG_FILE))
- os.remove(new_changelog)
-
-
-class IncrementVersion(Step):
- MESSAGE = "Increment version number."
-
- def RunStep(self):
- self.RestoreIfUnset("build")
- new_build = str(int(self._state["build"]) + 1)
-
- if self.Confirm(("Automatically increment BUILD_NUMBER? (Saying 'n' will "
- "fire up your EDITOR on %s so you can make arbitrary "
- "changes. When you're done, save the file and exit your "
- "EDITOR.)" % self.Config(VERSION_FILE))):
- text = FileToText(self.Config(VERSION_FILE))
- text = MSub(r"(?<=#define BUILD_NUMBER)(?P<space>\s+)\d*$",
- r"\g<space>%s" % new_build,
- text)
- TextToFile(text, self.Config(VERSION_FILE))
- else:
- self.Editor(self.Config(VERSION_FILE))
-
- self.ReadAndPersistVersion("new_")
-
-
-class CommitLocal(Step):
- MESSAGE = "Commit to local branch."
-
- def RunStep(self):
- self.RestoreVersionIfUnset("new_")
- prep_commit_msg = ("Prepare push to trunk. "
- "Now working on version %s.%s.%s." % (self._state["new_major"],
- self._state["new_minor"],
- self._state["new_build"]))
- self.Persist("prep_commit_msg", prep_commit_msg)
-
- # Include optional TBR only in the git command. The persisted commit
- # message is used for finding the commit again later.
- review = "\n\nTBR=%s" % self._options.r if self._options.tbr_commit else ""
- if self.Git("commit -a -m \"%s%s\"" % (prep_commit_msg, review)) is None:
- self.Die("'git commit -a' failed.")
-
-
-class CommitRepository(Step):
- MESSAGE = "Commit to the repository."
-
- def RunStep(self):
- self.WaitForLGTM()
- # Re-read the ChangeLog entry (to pick up possible changes).
- # FIXME(machenbach): This was hanging once with a broken pipe.
- TextToFile(GetLastChangeLogEntries(self.Config(CHANGELOG_FILE)),
- self.Config(CHANGELOG_ENTRY_FILE))
-
- if self.Git("cl presubmit", "PRESUBMIT_TREE_CHECK=\"skip\"") is None:
- self.Die("'git cl presubmit' failed, please try again.")
-
- if self.Git("cl dcommit -f --bypass-hooks",
- retry_on=lambda x: x is None) is None:
- self.Die("'git cl dcommit' failed, please try again.")
+ # Safe new change log for adding it later to the trunk patch.
+ TextToFile(changelog_entry, self.Config(CHANGELOG_ENTRY_FILE))
class StragglerCommits(Step):
@@ -266,13 +229,8 @@ class StragglerCommits(Step):
"started.")
def RunStep(self):
- if self.Git("svn fetch") is None:
- self.Die("'git svn fetch' failed.")
- self.Git("checkout svn/bleeding_edge")
- self.RestoreIfUnset("prep_commit_msg")
- args = "log -1 --format=%%H --grep=\"%s\"" % self._state["prep_commit_msg"]
- prepare_commit_hash = self.Git(args).strip()
- self.Persist("prepare_commit_hash", prepare_commit_hash)
+ self.GitSVNFetch()
+ self.GitCheckout("svn/bleeding_edge")
class SquashCommits(Step):
@@ -281,25 +239,20 @@ class SquashCommits(Step):
def RunStep(self):
# Instead of relying on "git rebase -i", we'll just create a diff, because
# that's easier to automate.
- self.RestoreIfUnset("prepare_commit_hash")
- args = "diff svn/trunk %s" % self._state["prepare_commit_hash"]
- TextToFile(self.Git(args), self.Config(PATCH_FILE))
+ TextToFile(self.GitDiff("svn/trunk", self["push_hash"]),
+ self.Config(PATCH_FILE))
# Convert the ChangeLog entry to commit message format.
- self.RestoreIfUnset("date")
text = FileToText(self.Config(CHANGELOG_ENTRY_FILE))
# Remove date and trailing white space.
- text = re.sub(r"^%s: " % self._state["date"], "", text.rstrip())
+ text = re.sub(r"^%s: " % self["date"], "", text.rstrip())
# Retrieve svn revision for showing the used bleeding edge revision in the
# commit message.
- args = "svn find-rev %s" % self._state["prepare_commit_hash"]
- svn_revision = self.Git(args).strip()
- self.Persist("svn_revision", svn_revision)
- text = MSub(r"^(Version \d+\.\d+\.\d+)$",
- "\\1 (based on bleeding_edge revision r%s)" % svn_revision,
- text)
+ self["svn_revision"] = self.GitSVNFindSVNRev(self["push_hash"])
+ suffix = PUSH_MESSAGE_SUFFIX % int(self["svn_revision"])
+ text = MSub(r"^(Version \d+\.\d+\.\d+)$", "\\1%s" % suffix, text)
# Remove indentation and merge paragraphs into single long lines, keeping
# empty lines between them.
@@ -308,19 +261,16 @@ class SquashCommits(Step):
strip = lambda line: line.strip()
text = SplitMapJoin("\n\n", SplitMapJoin("\n", strip, " "), "\n\n")(text)
- if not text:
+ if not text: # pragma: no cover
self.Die("Commit message editing failed.")
TextToFile(text, self.Config(COMMITMSG_FILE))
- os.remove(self.Config(CHANGELOG_ENTRY_FILE))
class NewBranch(Step):
MESSAGE = "Create a new branch from trunk."
def RunStep(self):
- if self.Git("checkout -b %s svn/trunk" % self.Config(TRUNKBRANCH)) is None:
- self.Die("Checking out a new branch '%s' failed." %
- self.Config(TRUNKBRANCH))
+ self.GitCreateBranch(self.Config(TRUNKBRANCH), "svn/trunk")
class ApplyChanges(Step):
@@ -331,19 +281,36 @@ class ApplyChanges(Step):
Command("rm", "-f %s*" % self.Config(PATCH_FILE))
+class AddChangeLog(Step):
+ MESSAGE = "Add ChangeLog changes to trunk branch."
+
+ def RunStep(self):
+ # The change log has been modified by the patch. Reset it to the version
+ # on trunk and apply the exact changes determined by this PrepareChangeLog
+ # step above.
+ self.GitCheckoutFile(self.Config(CHANGELOG_FILE), "svn/trunk")
+ changelog_entry = FileToText(self.Config(CHANGELOG_ENTRY_FILE))
+ old_change_log = FileToText(self.Config(CHANGELOG_FILE))
+ new_change_log = "%s\n\n\n%s" % (changelog_entry, old_change_log)
+ TextToFile(new_change_log, self.Config(CHANGELOG_FILE))
+ os.remove(self.Config(CHANGELOG_ENTRY_FILE))
+
+
class SetVersion(Step):
MESSAGE = "Set correct version for trunk."
def RunStep(self):
- self.RestoreVersionIfUnset()
+ # The version file has been modified by the patch. Reset it to the version
+ # on trunk and apply the correct version.
+ self.GitCheckoutFile(self.Config(VERSION_FILE), "svn/trunk")
output = ""
for line in FileToText(self.Config(VERSION_FILE)).splitlines():
if line.startswith("#define MAJOR_VERSION"):
- line = re.sub("\d+$", self._state["major"], line)
+ line = re.sub("\d+$", self["new_major"], line)
elif line.startswith("#define MINOR_VERSION"):
- line = re.sub("\d+$", self._state["minor"], line)
+ line = re.sub("\d+$", self["new_minor"], line)
elif line.startswith("#define BUILD_NUMBER"):
- line = re.sub("\d+$", self._state["build"], line)
+ line = re.sub("\d+$", self["new_build"], line)
elif line.startswith("#define PATCH_LEVEL"):
line = re.sub("\d+$", "0", line)
elif line.startswith("#define IS_CANDIDATE_VERSION"):
@@ -356,9 +323,7 @@ class CommitTrunk(Step):
MESSAGE = "Commit to local trunk branch."
def RunStep(self):
- self.Git("add \"%s\"" % self.Config(VERSION_FILE))
- if self.Git("commit -F \"%s\"" % self.Config(COMMITMSG_FILE)) is None:
- self.Die("'git commit' failed.")
+ self.GitCommit(file_name = self.Config(COMMITMSG_FILE))
Command("rm", "-f %s*" % self.Config(COMMITMSG_FILE))
@@ -366,251 +331,115 @@ class SanityCheck(Step):
MESSAGE = "Sanity check."
def RunStep(self):
+ # TODO(machenbach): Run presubmit script here as it is now missing in the
+ # prepare push process.
if not self.Confirm("Please check if your local checkout is sane: Inspect "
"%s, compile, run tests. Do you want to commit this new trunk "
"revision to the repository?" % self.Config(VERSION_FILE)):
- self.Die("Execution canceled.")
+ self.Die("Execution canceled.") # pragma: no cover
class CommitSVN(Step):
MESSAGE = "Commit to SVN."
def RunStep(self):
- result = self.Git("svn dcommit 2>&1", retry_on=lambda x: x is None)
- if not result:
+ result = self.GitSVNDCommit()
+ if not result: # pragma: no cover
self.Die("'git svn dcommit' failed.")
result = filter(lambda x: re.search(r"^Committed r[0-9]+", x),
result.splitlines())
if len(result) > 0:
- trunk_revision = re.sub(r"^Committed r([0-9]+)", r"\1", result[0])
+ self["trunk_revision"] = re.sub(r"^Committed r([0-9]+)", r"\1",result[0])
# Sometimes grepping for the revision fails. No idea why. If you figure
# out why it is flaky, please do fix it properly.
- if not trunk_revision:
+ if not self["trunk_revision"]:
print("Sorry, grepping for the SVN revision failed. Please look for it "
"in the last command's output above and provide it manually (just "
"the number, without the leading \"r\").")
self.DieNoManualMode("Can't prompt in forced mode.")
- while not trunk_revision:
+ while not self["trunk_revision"]:
print "> ",
- trunk_revision = self.ReadLine()
- self.Persist("trunk_revision", trunk_revision)
+ self["trunk_revision"] = self.ReadLine()
class TagRevision(Step):
MESSAGE = "Tag the new revision."
def RunStep(self):
- self.RestoreVersionIfUnset()
- ver = "%s.%s.%s" % (self._state["major"],
- self._state["minor"],
- self._state["build"])
- if self.Git("svn tag %s -m \"Tagging version %s\"" % (ver, ver),
- retry_on=lambda x: x is None) is None:
- self.Die("'git svn tag' failed.")
-
-
-class CheckChromium(Step):
- MESSAGE = "Ask for chromium checkout."
-
- def Run(self):
- chrome_path = self._options.c
- if not chrome_path:
- self.DieNoManualMode("Please specify the path to a Chromium checkout in "
- "forced mode.")
- print ("Do you have a \"NewGit\" Chromium checkout and want "
- "this script to automate creation of the roll CL? If yes, enter the "
- "path to (and including) the \"src\" directory here, otherwise just "
- "press <Return>: "),
- chrome_path = self.ReadLine()
- self.Persist("chrome_path", chrome_path)
-
-
-class SwitchChromium(Step):
- MESSAGE = "Switch to Chromium checkout."
- REQUIRES = "chrome_path"
-
- def RunStep(self):
- v8_path = os.getcwd()
- self.Persist("v8_path", v8_path)
- os.chdir(self._state["chrome_path"])
- self.InitialEnvironmentChecks()
- # Check for a clean workdir.
- if self.Git("status -s -uno").strip() != "":
- self.Die("Workspace is not clean. Please commit or undo your changes.")
- # Assert that the DEPS file is there.
- if not os.path.exists(self.Config(DEPS_FILE)):
- self.Die("DEPS file not present.")
-
-
-class UpdateChromiumCheckout(Step):
- MESSAGE = "Update the checkout and create a new branch."
- REQUIRES = "chrome_path"
-
- def RunStep(self):
- os.chdir(self._state["chrome_path"])
- if self.Git("checkout master") is None:
- self.Die("'git checkout master' failed.")
- if self.Git("pull") is None:
- self.Die("'git pull' failed, please try again.")
-
- self.RestoreIfUnset("trunk_revision")
- args = "checkout -b v8-roll-%s" % self._state["trunk_revision"]
- if self.Git(args) is None:
- self.Die("Failed to checkout a new branch.")
-
-
-class UploadCL(Step):
- MESSAGE = "Create and upload CL."
- REQUIRES = "chrome_path"
-
- def RunStep(self):
- os.chdir(self._state["chrome_path"])
-
- # Patch DEPS file.
- self.RestoreIfUnset("trunk_revision")
- deps = FileToText(self.Config(DEPS_FILE))
- deps = re.sub("(?<=\"v8_revision\": \")([0-9]+)(?=\")",
- self._state["trunk_revision"],
- deps)
- TextToFile(deps, self.Config(DEPS_FILE))
-
- self.RestoreVersionIfUnset()
- ver = "%s.%s.%s" % (self._state["major"],
- self._state["minor"],
- self._state["build"])
- if self._options.r:
- print "Using account %s for review." % self._options.r
- rev = self._options.r
- else:
- print "Please enter the email address of a reviewer for the roll CL: ",
- self.DieNoManualMode("A reviewer must be specified in forced mode.")
- rev = self.ReadLine()
- self.RestoreIfUnset("svn_revision")
- args = ("commit -am \"Update V8 to version %s "
- "(based on bleeding_edge revision r%s).\n\nTBR=%s\""
- % (ver, self._state["svn_revision"], rev))
- if self.Git(args) is None:
- self.Die("'git commit' failed.")
- force_flag = " -f" if self._options.force_upload else ""
- if self.Git("cl upload --send-mail%s" % force_flag, pipe=False) is None:
- self.Die("'git cl upload' failed, please try again.")
- print "CL uploaded."
-
-
-class SwitchV8(Step):
- MESSAGE = "Returning to V8 checkout."
- REQUIRES = "chrome_path"
-
- def RunStep(self):
- self.RestoreIfUnset("v8_path")
- os.chdir(self._state["v8_path"])
+ self.GitSVNTag(self["version"])
class CleanUp(Step):
MESSAGE = "Done!"
def RunStep(self):
- self.RestoreVersionIfUnset()
- ver = "%s.%s.%s" % (self._state["major"],
- self._state["minor"],
- self._state["build"])
- self.RestoreIfUnset("trunk_revision")
- self.RestoreIfUnset("chrome_path")
-
- if self._state["chrome_path"]:
- print("Congratulations, you have successfully created the trunk "
- "revision %s and rolled it into Chromium. Please don't forget to "
- "update the v8rel spreadsheet:" % ver)
- else:
- print("Congratulations, you have successfully created the trunk "
- "revision %s. Please don't forget to roll this new version into "
- "Chromium, and to update the v8rel spreadsheet:" % ver)
- print "%s\ttrunk\t%s" % (ver, self._state["trunk_revision"])
+ print("Congratulations, you have successfully created the trunk "
+ "revision %s. Please don't forget to roll this new version into "
+ "Chromium, and to update the v8rel spreadsheet:"
+ % self["version"])
+ print "%s\ttrunk\t%s" % (self["version"], self["trunk_revision"])
self.CommonCleanup()
- if self.Config(TRUNKBRANCH) != self._state["current_branch"]:
- self.Git("branch -D %s" % self.Config(TRUNKBRANCH))
-
-
-def RunPushToTrunk(config,
- options,
- side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
- step_classes = [
- Preparation,
- FreshBranch,
- DetectLastPush,
- PrepareChangeLog,
- EditChangeLog,
- IncrementVersion,
- CommitLocal,
- UploadStep,
- CommitRepository,
- StragglerCommits,
- SquashCommits,
- NewBranch,
- ApplyChanges,
- SetVersion,
- CommitTrunk,
- SanityCheck,
- CommitSVN,
- TagRevision,
- CheckChromium,
- SwitchChromium,
- UpdateChromiumCheckout,
- UploadCL,
- SwitchV8,
- CleanUp,
- ]
-
- RunScript(step_classes, config, options, side_effect_handler)
-
-
-def BuildOptions():
- result = optparse.OptionParser()
- result.add_option("-c", "--chromium", dest="c",
- help=("Specify the path to your Chromium src/ "
- "directory to automate the V8 roll."))
- result.add_option("-f", "--force", dest="f",
- help="Don't prompt the user.",
- default=False, action="store_true")
- result.add_option("-l", "--last-push", dest="l",
- help=("Manually specify the git commit ID "
- "of the last push to trunk."))
- result.add_option("-m", "--manual", dest="m",
- help="Prompt the user at every important step.",
- default=False, action="store_true")
- result.add_option("-r", "--reviewer", dest="r",
- help=("Specify the account name to be used for reviews."))
- result.add_option("-s", "--step", dest="s",
- help="Specify the step where to start work. Default: 0.",
- default=0, type="int")
- return result
-
-
-def ProcessOptions(options):
- if options.s < 0:
- print "Bad step number %d" % options.s
- return False
- if not options.m and not options.r:
- print "A reviewer (-r) is required in (semi-)automatic mode."
- return False
- if options.f and options.m:
- print "Manual and forced mode cannot be combined."
- return False
- if not options.m and not options.c:
- print "A chromium checkout (-c) is required in (semi-)automatic mode."
- return False
- return True
-
-
-def Main():
- parser = BuildOptions()
- (options, args) = parser.parse_args()
- if not ProcessOptions(options):
- parser.print_help()
- return 1
- RunPushToTrunk(CONFIG, PushToTrunkOptions(options))
-
-if __name__ == "__main__":
- sys.exit(Main())
+ if self.Config(TRUNKBRANCH) != self["current_branch"]:
+ self.GitDeleteBranch(self.Config(TRUNKBRANCH))
+
+
+class PushToTrunk(ScriptsBase):
+ def _PrepareOptions(self, parser):
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument("-f", "--force",
+ help="Don't prompt the user.",
+ default=False, action="store_true")
+ group.add_argument("-m", "--manual",
+ help="Prompt the user at every important step.",
+ default=False, action="store_true")
+ parser.add_argument("-b", "--last-bleeding-edge",
+ help=("The git commit ID of the last bleeding edge "
+ "revision that was pushed to trunk. This is "
+ "used for the auto-generated ChangeLog entry."))
+ parser.add_argument("-l", "--last-push",
+ help="The git commit ID of the last push to trunk.")
+ parser.add_argument("-R", "--revision",
+ help="The svn revision to push (defaults to HEAD).")
+
+ def _ProcessOptions(self, options): # pragma: no cover
+ if not options.manual and not options.reviewer:
+ print "A reviewer (-r) is required in (semi-)automatic mode."
+ return False
+ if not options.manual and not options.author:
+ print "Specify your chromium.org email with -a in (semi-)automatic mode."
+ return False
+ if options.revision and not int(options.revision) > 0:
+ print("The --revision flag must be a positiv integer pointing to a "
+ "valid svn revision.")
+ return False
+
+ options.tbr_commit = not options.manual
+ return True
+
+ def _Steps(self):
+ return [
+ Preparation,
+ FreshBranch,
+ PreparePushRevision,
+ DetectLastPush,
+ IncrementVersion,
+ PrepareChangeLog,
+ EditChangeLog,
+ StragglerCommits,
+ SquashCommits,
+ NewBranch,
+ ApplyChanges,
+ AddChangeLog,
+ SetVersion,
+ CommitTrunk,
+ SanityCheck,
+ CommitSVN,
+ TagRevision,
+ CleanUp,
+ ]
+
+
+if __name__ == "__main__": # pragma: no cover
+ sys.exit(PushToTrunk(CONFIG).Run())
diff --git a/deps/v8/tools/push-to-trunk/script_test.py b/deps/v8/tools/push-to-trunk/script_test.py
new file mode 100755
index 000000000..cbb2134f6
--- /dev/null
+++ b/deps/v8/tools/push-to-trunk/script_test.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Wraps test execution with a coverage analysis. To get the best speed, the
+# native python coverage version >= 3.7.1 should be installed.
+
+import coverage
+import os
+import unittest
+import sys
+
+
+def Main(argv):
+ script_path = os.path.dirname(os.path.abspath(__file__))
+ cov = coverage.coverage(include=([os.path.join(script_path, '*.py')]))
+ cov.start()
+ import test_scripts
+ alltests = map(unittest.TestLoader().loadTestsFromTestCase, [
+ test_scripts.ToplevelTest,
+ test_scripts.ScriptTest,
+ test_scripts.SystemTest,
+ ])
+ unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(alltests))
+ cov.stop()
+ print cov.report()
+
+
+if __name__ == '__main__':
+ sys.exit(Main(sys.argv))
diff --git a/deps/v8/tools/push-to-trunk/test_scripts.py b/deps/v8/tools/push-to-trunk/test_scripts.py
index 90f484911..9107db97e 100644
--- a/deps/v8/tools/push-to-trunk/test_scripts.py
+++ b/deps/v8/tools/push-to-trunk/test_scripts.py
@@ -31,15 +31,19 @@ import tempfile
import traceback
import unittest
+import auto_push
+from auto_push import CheckLastPush
+from auto_push import SETTINGS_LOCATION
import common_includes
from common_includes import *
+import merge_to_branch
+from merge_to_branch import *
import push_to_trunk
from push_to_trunk import *
-import auto_roll
-from auto_roll import AutoRollOptions
-from auto_roll import CheckLastPush
-from auto_roll import FetchLatestRevision
-from auto_roll import SETTINGS_LOCATION
+import chromium_roll
+from chromium_roll import CHROMIUM
+from chromium_roll import DEPS_FILE
+from chromium_roll import ChromiumRoll
TEST_CONFIG = {
@@ -56,23 +60,17 @@ TEST_CONFIG = {
CHROMIUM: "/tmp/test-v8-push-to-trunk-tempfile-chromium",
DEPS_FILE: "/tmp/test-v8-push-to-trunk-tempfile-chromium/DEPS",
SETTINGS_LOCATION: None,
+ ALREADY_MERGING_SENTINEL_FILE:
+ "/tmp/test-merge-to-branch-tempfile-already-merging",
+ COMMIT_HASHES_FILE: "/tmp/test-merge-to-branch-tempfile-PATCH_COMMIT_HASHES",
+ TEMPORARY_PATCH_FILE: "/tmp/test-merge-to-branch-tempfile-temporary-patch",
}
-def MakeOptions(s=0, l=None, f=False, m=True, r=None, c=None,
- status_password=None):
- """Convenience wrapper."""
- class Options(object):
- pass
- options = Options()
- options.s = s
- options.l = l
- options.f = f
- options.m = m
- options.r = r
- options.c = c
- options.status_password = status_password
- return options
+AUTO_PUSH_ARGS = [
+ "-a", "author@chromium.org",
+ "-r", "reviewer@chromium.org",
+]
class ToplevelTest(unittest.TestCase):
@@ -213,6 +211,31 @@ Committed: https://code.google.com/p/v8/source/detail?r=18210
"BUG=1234567890\n"))
+def Git(*args, **kwargs):
+ """Convenience function returning a git test expectation."""
+ return {
+ "name": "git",
+ "args": args[:-1],
+ "ret": args[-1],
+ "cb": kwargs.get("cb"),
+ }
+
+
+def RL(text, cb=None):
+ """Convenience function returning a readline test expectation."""
+ return {"name": "readline", "args": [], "ret": text, "cb": cb}
+
+
+def URL(*args, **kwargs):
+ """Convenience function returning a readurl test expectation."""
+ return {
+ "name": "readurl",
+ "args": args[:-1],
+ "ret": args[-1],
+ "cb": kwargs.get("cb"),
+ }
+
+
class SimpleMock(object):
def __init__(self, name):
self._name = name
@@ -222,45 +245,45 @@ class SimpleMock(object):
def Expect(self, recipe):
self._recipe = recipe
- def Call(self, *args):
+ def Call(self, name, *args): # pragma: no cover
self._index += 1
try:
expected_call = self._recipe[self._index]
except IndexError:
- raise Exception("Calling %s %s" % (self._name, " ".join(args)))
+ raise NoRetryException("Calling %s %s" % (name, " ".join(args)))
+
+ if not isinstance(expected_call, dict):
+ raise NoRetryException("Found wrong expectation type for %s %s"
+ % (name, " ".join(args)))
- # Pack expectations without arguments into a list.
- if not isinstance(expected_call, list):
- expected_call = [expected_call]
# The number of arguments in the expectation must match the actual
# arguments.
- if len(args) > len(expected_call):
+ if len(args) > len(expected_call['args']):
raise NoRetryException("When calling %s with arguments, the "
"expectations must consist of at least as many arguments.")
# Compare expected and actual arguments.
- for (expected_arg, actual_arg) in zip(expected_call, args):
+ for (expected_arg, actual_arg) in zip(expected_call['args'], args):
if expected_arg != actual_arg:
raise NoRetryException("Expected: %s - Actual: %s"
% (expected_arg, actual_arg))
- # The expectation list contains a mandatory return value and an optional
- # callback for checking the context at the time of the call.
- if len(expected_call) == len(args) + 2:
+ # The expected call contains an optional callback for checking the context
+ # at the time of the call.
+ if expected_call['cb']:
try:
- expected_call[len(args) + 1]()
+ expected_call['cb']()
except:
tb = traceback.format_exc()
raise NoRetryException("Caught exception from callback: %s" % tb)
- return_value = expected_call[len(args)]
# If the return value is an exception, raise it instead of returning.
- if isinstance(return_value, Exception):
- raise return_value
- return return_value
+ if isinstance(expected_call['ret'], Exception):
+ raise expected_call['ret']
+ return expected_call['ret']
- def AssertFinished(self):
+ def AssertFinished(self): # pragma: no cover
if self._index < len(self._recipe) -1:
raise NoRetryException("Called %s too seldom: %d vs. %d"
% (self._name, self._index, len(self._recipe)))
@@ -273,35 +296,42 @@ class ScriptTest(unittest.TestCase):
self._tmp_files.append(name)
return name
- def MakeTempVersionFile(self):
- name = self.MakeEmptyTempFile()
- with open(name, "w") as f:
+ def WriteFakeVersionFile(self, build=4):
+ with open(TEST_CONFIG[VERSION_FILE], "w") as f:
f.write(" // Some line...\n")
f.write("\n")
f.write("#define MAJOR_VERSION 3\n")
f.write("#define MINOR_VERSION 22\n")
- f.write("#define BUILD_NUMBER 5\n")
+ f.write("#define BUILD_NUMBER %s\n" % build)
f.write("#define PATCH_LEVEL 0\n")
f.write(" // Some line...\n")
f.write("#define IS_CANDIDATE_VERSION 0\n")
- return name
- def MakeStep(self, step_class=Step, state=None, options=None):
+ def MakeStep(self):
"""Convenience wrapper."""
- options = options or CommonOptions(MakeOptions())
- return MakeStep(step_class=step_class, number=0, state=state,
- config=TEST_CONFIG, options=options,
- side_effect_handler=self)
+ options = ScriptsBase(TEST_CONFIG, self, self._state).MakeOptions([])
+ return MakeStep(step_class=Step, state=self._state,
+ config=TEST_CONFIG, side_effect_handler=self,
+ options=options)
+
+ def RunStep(self, script=PushToTrunk, step_class=Step, args=None):
+ """Convenience wrapper."""
+ args = args or ["-m"]
+ return script(TEST_CONFIG, self, self._state).RunSteps([step_class], args)
def GitMock(self, cmd, args="", pipe=True):
print "%s %s" % (cmd, args)
- return self._git_mock.Call(args)
+ return self._git_mock.Call("git", args)
def LogMock(self, cmd, args=""):
print "Log: %s %s" % (cmd, args)
MOCKS = {
"git": GitMock,
+ # TODO(machenbach): Little hack to reuse the git mock for the one svn call
+ # in merge-to-branch. The command should be made explicit in the test
+ # expectations.
+ "svn": GitMock,
"vi": LogMock,
}
@@ -312,13 +342,13 @@ class ScriptTest(unittest.TestCase):
return ScriptTest.MOCKS[cmd](self, cmd, args)
def ReadLine(self):
- return self._rl_mock.Call()
+ return self._rl_mock.Call("readline")
def ReadURL(self, url, params):
if params is not None:
- return self._url_mock.Call(url, params)
+ return self._url_mock.Call("readurl", url, params)
else:
- return self._url_mock.Call(url)
+ return self._url_mock.Call("readurl", url)
def Sleep(self, seconds):
pass
@@ -343,6 +373,7 @@ class ScriptTest(unittest.TestCase):
self._rl_mock = SimpleMock("readline")
self._url_mock = SimpleMock("readurl")
self._tmp_files = []
+ self._state = {}
def tearDown(self):
Command("rm", "-rf %s*" % TEST_CONFIG[PERSISTFILE_BASENAME])
@@ -356,59 +387,53 @@ class ScriptTest(unittest.TestCase):
self._rl_mock.AssertFinished()
self._url_mock.AssertFinished()
- def testPersistRestore(self):
- self.MakeStep().Persist("test1", "")
- self.assertEquals("", self.MakeStep().Restore("test1"))
- self.MakeStep().Persist("test2", "AB123")
- self.assertEquals("AB123", self.MakeStep().Restore("test2"))
-
def testGitOrig(self):
self.assertTrue(Command("git", "--version").startswith("git version"))
def testGitMock(self):
- self.ExpectGit([["--version", "git version 1.2.3"], ["dummy", ""]])
+ self.ExpectGit([Git("--version", "git version 1.2.3"), Git("dummy", "")])
self.assertEquals("git version 1.2.3", self.MakeStep().Git("--version"))
self.assertEquals("", self.MakeStep().Git("dummy"))
def testCommonPrepareDefault(self):
self.ExpectGit([
- ["status -s -uno", ""],
- ["status -s -b -uno", "## some_branch"],
- ["svn fetch", ""],
- ["branch", " branch1\n* %s" % TEST_CONFIG[TEMP_BRANCH]],
- ["branch -D %s" % TEST_CONFIG[TEMP_BRANCH], ""],
- ["checkout -b %s" % TEST_CONFIG[TEMP_BRANCH], ""],
- ["branch", ""],
+ Git("status -s -uno", ""),
+ Git("status -s -b -uno", "## some_branch"),
+ Git("svn fetch", ""),
+ Git("branch", " branch1\n* %s" % TEST_CONFIG[TEMP_BRANCH]),
+ Git("branch -D %s" % TEST_CONFIG[TEMP_BRANCH], ""),
+ Git("checkout -b %s" % TEST_CONFIG[TEMP_BRANCH], ""),
+ Git("branch", ""),
])
- self.ExpectReadline(["Y"])
+ self.ExpectReadline([RL("Y")])
self.MakeStep().CommonPrepare()
self.MakeStep().PrepareBranch()
- self.assertEquals("some_branch", self.MakeStep().Restore("current_branch"))
+ self.assertEquals("some_branch", self._state["current_branch"])
def testCommonPrepareNoConfirm(self):
self.ExpectGit([
- ["status -s -uno", ""],
- ["status -s -b -uno", "## some_branch"],
- ["svn fetch", ""],
- ["branch", " branch1\n* %s" % TEST_CONFIG[TEMP_BRANCH]],
+ Git("status -s -uno", ""),
+ Git("status -s -b -uno", "## some_branch"),
+ Git("svn fetch", ""),
+ Git("branch", " branch1\n* %s" % TEST_CONFIG[TEMP_BRANCH]),
])
- self.ExpectReadline(["n"])
+ self.ExpectReadline([RL("n")])
self.MakeStep().CommonPrepare()
self.assertRaises(Exception, self.MakeStep().PrepareBranch)
- self.assertEquals("some_branch", self.MakeStep().Restore("current_branch"))
+ self.assertEquals("some_branch", self._state["current_branch"])
def testCommonPrepareDeleteBranchFailure(self):
self.ExpectGit([
- ["status -s -uno", ""],
- ["status -s -b -uno", "## some_branch"],
- ["svn fetch", ""],
- ["branch", " branch1\n* %s" % TEST_CONFIG[TEMP_BRANCH]],
- ["branch -D %s" % TEST_CONFIG[TEMP_BRANCH], None],
+ Git("status -s -uno", ""),
+ Git("status -s -b -uno", "## some_branch"),
+ Git("svn fetch", ""),
+ Git("branch", " branch1\n* %s" % TEST_CONFIG[TEMP_BRANCH]),
+ Git("branch -D %s" % TEST_CONFIG[TEMP_BRANCH], None),
])
- self.ExpectReadline(["Y"])
+ self.ExpectReadline([RL("Y")])
self.MakeStep().CommonPrepare()
self.assertRaises(Exception, self.MakeStep().PrepareBranch)
- self.assertEquals("some_branch", self.MakeStep().Restore("current_branch"))
+ self.assertEquals("some_branch", self._state["current_branch"])
def testInitialEnvironmentChecks(self):
TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
@@ -416,17 +441,14 @@ class ScriptTest(unittest.TestCase):
self.MakeStep().InitialEnvironmentChecks()
def testReadAndPersistVersion(self):
- TEST_CONFIG[VERSION_FILE] = self.MakeTempVersionFile()
+ TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
+ self.WriteFakeVersionFile(build=5)
step = self.MakeStep()
step.ReadAndPersistVersion()
- self.assertEquals("3", self.MakeStep().Restore("major"))
- self.assertEquals("22", self.MakeStep().Restore("minor"))
- self.assertEquals("5", self.MakeStep().Restore("build"))
- self.assertEquals("0", self.MakeStep().Restore("patch"))
- self.assertEquals("3", step._state["major"])
- self.assertEquals("22", step._state["minor"])
- self.assertEquals("5", step._state["build"])
- self.assertEquals("0", step._state["patch"])
+ self.assertEquals("3", step["major"])
+ self.assertEquals("22", step["minor"])
+ self.assertEquals("5", step["build"])
+ self.assertEquals("0", step["patch"])
def testRegex(self):
self.assertEqual("(issue 321)",
@@ -449,36 +471,48 @@ class ScriptTest(unittest.TestCase):
r"\g<space>3",
"//\n#define BUILD_NUMBER 321\n"))
+ def testPreparePushRevision(self):
+ # Tests the default push hash used when the --revision option is not set.
+ self.ExpectGit([
+ Git("log -1 --format=%H HEAD", "push_hash")
+ ])
+
+ self.RunStep(PushToTrunk, PreparePushRevision)
+ self.assertEquals("push_hash", self._state["push_hash"])
+
def testPrepareChangeLog(self):
- TEST_CONFIG[VERSION_FILE] = self.MakeTempVersionFile()
+ TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
+ self.WriteFakeVersionFile()
TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
self.ExpectGit([
- ["log 1234..HEAD --format=%H", "rev1\nrev2\nrev3\nrev4"],
- ["log -1 rev1 --format=\"%s\"", "Title text 1"],
- ["log -1 rev1 --format=\"%B\"", "Title\n\nBUG=\nLOG=y\n"],
- ["log -1 rev1 --format=\"%an\"", "author1@chromium.org"],
- ["log -1 rev2 --format=\"%s\"", "Title text 2."],
- ["log -1 rev2 --format=\"%B\"", "Title\n\nBUG=123\nLOG= \n"],
- ["log -1 rev2 --format=\"%an\"", "author2@chromium.org"],
- ["log -1 rev3 --format=\"%s\"", "Title text 3"],
- ["log -1 rev3 --format=\"%B\"", "Title\n\nBUG=321\nLOG=true\n"],
- ["log -1 rev3 --format=\"%an\"", "author3@chromium.org"],
- ["log -1 rev4 --format=\"%s\"", "Title text 4"],
- ["log -1 rev4 --format=\"%B\"",
+ Git("log --format=%H 1234..push_hash", "rev1\nrev2\nrev3\nrev4"),
+ Git("log -1 --format=%s rev1", "Title text 1"),
+ Git("log -1 --format=%B rev1", "Title\n\nBUG=\nLOG=y\n"),
+ Git("log -1 --format=%an rev1", "author1@chromium.org"),
+ Git("log -1 --format=%s rev2", "Title text 2."),
+ Git("log -1 --format=%B rev2", "Title\n\nBUG=123\nLOG= \n"),
+ Git("log -1 --format=%an rev2", "author2@chromium.org"),
+ Git("log -1 --format=%s rev3", "Title text 3"),
+ Git("log -1 --format=%B rev3", "Title\n\nBUG=321\nLOG=true\n"),
+ Git("log -1 --format=%an rev3", "author3@chromium.org"),
+ Git("log -1 --format=%s rev4", "Title text 4"),
+ Git("log -1 --format=%B rev4",
("Title\n\nBUG=456\nLOG=Y\n\n"
- "Review URL: https://codereview.chromium.org/9876543210\n")],
- ["log -1 rev4 --format=\"%an\"", "author4@chromium.org"],
+ "Review URL: https://codereview.chromium.org/9876543210\n")),
+ Git("log -1 --format=%an rev4", "author4@chromium.org"),
])
# The cl for rev4 on rietveld has an updated LOG flag.
self.ExpectReadURL([
- ["https://codereview.chromium.org/9876543210/description",
- "Title\n\nBUG=456\nLOG=N\n\n"],
+ URL("https://codereview.chromium.org/9876543210/description",
+ "Title\n\nBUG=456\nLOG=N\n\n"),
])
- self.MakeStep().Persist("last_push", "1234")
- self.MakeStep(PrepareChangeLog).Run()
+ self._state["last_push_bleeding_edge"] = "1234"
+ self._state["push_hash"] = "push_hash"
+ self._state["version"] = "3.22.5"
+ self.RunStep(PushToTrunk, PrepareChangeLog)
actual_cl = FileToText(TEST_CONFIG[CHANGELOG_ENTRY_FILE])
@@ -509,57 +543,40 @@ class ScriptTest(unittest.TestCase):
#"""
self.assertEquals(expected_cl, actual_cl)
- self.assertEquals("3", self.MakeStep().Restore("major"))
- self.assertEquals("22", self.MakeStep().Restore("minor"))
- self.assertEquals("5", self.MakeStep().Restore("build"))
- self.assertEquals("0", self.MakeStep().Restore("patch"))
def testEditChangeLog(self):
TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
- TEST_CONFIG[CHANGELOG_FILE] = self.MakeEmptyTempFile()
- TextToFile(" Original CL", TEST_CONFIG[CHANGELOG_FILE])
TextToFile(" New \n\tLines \n", TEST_CONFIG[CHANGELOG_ENTRY_FILE])
os.environ["EDITOR"] = "vi"
self.ExpectReadline([
- "", # Open editor.
+ RL(""), # Open editor.
])
- self.MakeStep(EditChangeLog).Run()
+ self.RunStep(PushToTrunk, EditChangeLog)
- self.assertEquals("New\n Lines\n\n\n Original CL",
- FileToText(TEST_CONFIG[CHANGELOG_FILE]))
+ self.assertEquals("New\n Lines",
+ FileToText(TEST_CONFIG[CHANGELOG_ENTRY_FILE]))
def testIncrementVersion(self):
- TEST_CONFIG[VERSION_FILE] = self.MakeTempVersionFile()
- self.MakeStep().Persist("build", "5")
+ TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
+ self.WriteFakeVersionFile()
+ self._state["last_push_trunk"] = "hash1"
- self.ExpectReadline([
- "Y", # Increment build number.
+ self.ExpectGit([
+ Git("checkout -f hash1 -- %s" % TEST_CONFIG[VERSION_FILE], "")
])
- self.MakeStep(IncrementVersion).Run()
-
- self.assertEquals("3", self.MakeStep().Restore("new_major"))
- self.assertEquals("22", self.MakeStep().Restore("new_minor"))
- self.assertEquals("6", self.MakeStep().Restore("new_build"))
- self.assertEquals("0", self.MakeStep().Restore("new_patch"))
-
- def testLastChangeLogEntries(self):
- TEST_CONFIG[CHANGELOG_FILE] = self.MakeEmptyTempFile()
- l = """
- Fixed something.
- (issue 1234)\n"""
- for _ in xrange(10): l = l + l
-
- cl_chunk = """2013-11-12: Version 3.23.2\n%s
- Performance and stability improvements on all platforms.\n\n\n""" % l
+ self.ExpectReadline([
+ RL("Y"), # Increment build number.
+ ])
- cl_chunk_full = cl_chunk + cl_chunk + cl_chunk
- TextToFile(cl_chunk_full, TEST_CONFIG[CHANGELOG_FILE])
+ self.RunStep(PushToTrunk, IncrementVersion)
- cl = GetLastChangeLogEntries(TEST_CONFIG[CHANGELOG_FILE])
- self.assertEquals(cl_chunk, cl)
+ self.assertEquals("3", self._state["new_major"])
+ self.assertEquals("22", self._state["new_minor"])
+ self.assertEquals("5", self._state["new_build"])
+ self.assertEquals("0", self._state["new_patch"])
def _TestSquashCommits(self, change_log, expected_msg):
TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
@@ -567,14 +584,14 @@ class ScriptTest(unittest.TestCase):
f.write(change_log)
self.ExpectGit([
- ["diff svn/trunk hash1", "patch content"],
- ["svn find-rev hash1", "123455\n"],
+ Git("diff svn/trunk hash1", "patch content"),
+ Git("svn find-rev hash1", "123455\n"),
])
- self.MakeStep().Persist("prepare_commit_hash", "hash1")
- self.MakeStep().Persist("date", "1999-11-11")
+ self._state["push_hash"] = "hash1"
+ self._state["date"] = "1999-11-11"
- self.MakeStep(SquashCommits).Run()
+ self.RunStep(PushToTrunk, SquashCommits)
self.assertEquals(FileToText(TEST_CONFIG[COMMITMSG_FILE]), expected_msg)
patch = FileToText(TEST_CONFIG[ PATCH_FILE])
@@ -615,27 +632,29 @@ Performance and stability improvements on all platforms."""
def _PushToTrunk(self, force=False, manual=False):
TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
- TEST_CONFIG[VERSION_FILE] = self.MakeTempVersionFile()
+
+ # The version file on bleeding edge has build level 5, while the version
+ # file from trunk has build level 4.
+ TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
+ self.WriteFakeVersionFile(build=5)
+
TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
TEST_CONFIG[CHANGELOG_FILE] = self.MakeEmptyTempFile()
- if not os.path.exists(TEST_CONFIG[CHROMIUM]):
- os.makedirs(TEST_CONFIG[CHROMIUM])
- TextToFile("1999-04-05: Version 3.22.4", TEST_CONFIG[CHANGELOG_FILE])
- TextToFile("Some line\n \"v8_revision\": \"123444\",\n some line",
- TEST_CONFIG[DEPS_FILE])
+ bleeding_edge_change_log = "2014-03-17: Sentinel\n"
+ TextToFile(bleeding_edge_change_log, TEST_CONFIG[CHANGELOG_FILE])
os.environ["EDITOR"] = "vi"
- def CheckPreparePush():
- cl = FileToText(TEST_CONFIG[CHANGELOG_FILE])
- self.assertTrue(re.search(r"Version 3.22.5", cl))
- self.assertTrue(re.search(r" Log text 1 \(issue 321\).", cl))
- self.assertFalse(re.search(r" \(author1@chromium\.org\)", cl))
+ def ResetChangeLog():
+ """On 'git co -b new_branch svn/trunk', and 'git checkout -- ChangeLog',
+ the ChangLog will be reset to its content on trunk."""
+ trunk_change_log = """1999-04-05: Version 3.22.4
- # Make sure all comments got stripped.
- self.assertFalse(re.search(r"^#", cl, flags=re.M))
+ Performance and stability improvements on all platforms.\n"""
+ TextToFile(trunk_change_log, TEST_CONFIG[CHANGELOG_FILE])
- version = FileToText(TEST_CONFIG[VERSION_FILE])
- self.assertTrue(re.search(r"#define BUILD_NUMBER\s+6", version))
+ def ResetToTrunk():
+ ResetChangeLog()
+ self.WriteFakeVersionFile()
def CheckSVNCommit():
commit = FileToText(TEST_CONFIG[COMMITMSG_FILE])
@@ -652,89 +671,84 @@ Performance and stability improvements on all platforms.""", commit)
self.assertTrue(re.search(r"#define PATCH_LEVEL\s+0", version))
self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
+ # Check that the change log on the trunk branch got correctly modified.
+ change_log = FileToText(TEST_CONFIG[CHANGELOG_FILE])
+ self.assertEquals(
+"""1999-07-31: Version 3.22.5
+
+ Log text 1 (issue 321).
+
+ Performance and stability improvements on all platforms.
+
+
+1999-04-05: Version 3.22.4
+
+ Performance and stability improvements on all platforms.\n""",
+ change_log)
+
force_flag = " -f" if not manual else ""
- review_suffix = "\n\nTBR=reviewer@chromium.org" if not manual else ""
self.ExpectGit([
- ["status -s -uno", ""],
- ["status -s -b -uno", "## some_branch\n"],
- ["svn fetch", ""],
- ["branch", " branch1\n* branch2\n"],
- ["checkout -b %s" % TEST_CONFIG[TEMP_BRANCH], ""],
- ["branch", " branch1\n* branch2\n"],
- ["branch", " branch1\n* branch2\n"],
- ["checkout -b %s svn/bleeding_edge" % TEST_CONFIG[BRANCHNAME], ""],
- ["log -1 --format=%H ChangeLog", "1234\n"],
- ["log -1 1234", "Last push ouput\n"],
- ["log 1234..HEAD --format=%H", "rev1\n"],
- ["log -1 rev1 --format=\"%s\"", "Log text 1.\n"],
- ["log -1 rev1 --format=\"%B\"", "Text\nLOG=YES\nBUG=v8:321\nText\n"],
- ["log -1 rev1 --format=\"%an\"", "author1@chromium.org\n"],
- [("commit -a -m \"Prepare push to trunk. "
- "Now working on version 3.22.6.%s\"" % review_suffix),
- " 2 files changed\n",
- CheckPreparePush],
- ["cl upload -r \"reviewer@chromium.org\" --send-mail%s" % force_flag,
- "done\n"],
- ["cl presubmit", "Presubmit successfull\n"],
- ["cl dcommit -f --bypass-hooks", "Closing issue\n"],
- ["svn fetch", "fetch result\n"],
- ["checkout svn/bleeding_edge", ""],
- [("log -1 --format=%H --grep=\"Prepare push to trunk. "
- "Now working on version 3.22.6.\""),
- "hash1\n"],
- ["diff svn/trunk hash1", "patch content\n"],
- ["svn find-rev hash1", "123455\n"],
- ["checkout -b %s svn/trunk" % TEST_CONFIG[TRUNKBRANCH], ""],
- ["apply --index --reject \"%s\"" % TEST_CONFIG[PATCH_FILE], ""],
- ["add \"%s\"" % TEST_CONFIG[VERSION_FILE], ""],
- ["commit -F \"%s\"" % TEST_CONFIG[COMMITMSG_FILE], "", CheckSVNCommit],
- ["svn dcommit 2>&1", "Some output\nCommitted r123456\nSome output\n"],
- ["svn tag 3.22.5 -m \"Tagging version 3.22.5\"", ""],
- ["status -s -uno", ""],
- ["checkout master", ""],
- ["pull", ""],
- ["checkout -b v8-roll-123456", ""],
- [("commit -am \"Update V8 to version 3.22.5 "
- "(based on bleeding_edge revision r123455).\n\n"
- "TBR=reviewer@chromium.org\""),
- ""],
- ["cl upload --send-mail%s" % force_flag, ""],
- ["checkout -f some_branch", ""],
- ["branch -D %s" % TEST_CONFIG[TEMP_BRANCH], ""],
- ["branch -D %s" % TEST_CONFIG[BRANCHNAME], ""],
- ["branch -D %s" % TEST_CONFIG[TRUNKBRANCH], ""],
+ Git("status -s -uno", ""),
+ Git("status -s -b -uno", "## some_branch\n"),
+ Git("svn fetch", ""),
+ Git("branch", " branch1\n* branch2\n"),
+ Git("checkout -b %s" % TEST_CONFIG[TEMP_BRANCH], ""),
+ Git("branch", " branch1\n* branch2\n"),
+ Git("branch", " branch1\n* branch2\n"),
+ Git("checkout -b %s svn/bleeding_edge" % TEST_CONFIG[BRANCHNAME], ""),
+ Git("svn find-rev r123455", "push_hash\n"),
+ Git(("log -1 --format=%H --grep="
+ "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\" "
+ "svn/trunk"), "hash2\n"),
+ Git("log -1 hash2", "Log message\n"),
+ Git("log -1 --format=%s hash2",
+ "Version 3.4.5 (based on bleeding_edge revision r1234)\n"),
+ Git("svn find-rev r1234", "hash3\n"),
+ Git("checkout -f hash2 -- %s" % TEST_CONFIG[VERSION_FILE], "",
+ cb=self.WriteFakeVersionFile),
+ Git("log --format=%H hash3..push_hash", "rev1\n"),
+ Git("log -1 --format=%s rev1", "Log text 1.\n"),
+ Git("log -1 --format=%B rev1", "Text\nLOG=YES\nBUG=v8:321\nText\n"),
+ Git("log -1 --format=%an rev1", "author1@chromium.org\n"),
+ Git("svn fetch", "fetch result\n"),
+ Git("checkout -f svn/bleeding_edge", ""),
+ Git("diff svn/trunk push_hash", "patch content\n"),
+ Git("svn find-rev push_hash", "123455\n"),
+ Git("checkout -b %s svn/trunk" % TEST_CONFIG[TRUNKBRANCH], "",
+ cb=ResetToTrunk),
+ Git("apply --index --reject \"%s\"" % TEST_CONFIG[PATCH_FILE], ""),
+ Git("checkout -f svn/trunk -- %s" % TEST_CONFIG[CHANGELOG_FILE], "",
+ cb=ResetChangeLog),
+ Git("checkout -f svn/trunk -- %s" % TEST_CONFIG[VERSION_FILE], "",
+ cb=self.WriteFakeVersionFile),
+ Git("commit -aF \"%s\"" % TEST_CONFIG[COMMITMSG_FILE], "",
+ cb=CheckSVNCommit),
+ Git("svn dcommit 2>&1", "Some output\nCommitted r123456\nSome output\n"),
+ Git("svn tag 3.22.5 -m \"Tagging version 3.22.5\"", ""),
+ Git("checkout -f some_branch", ""),
+ Git("branch -D %s" % TEST_CONFIG[TEMP_BRANCH], ""),
+ Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
+ Git("branch -D %s" % TEST_CONFIG[TRUNKBRANCH], ""),
])
# Expected keyboard input in manual mode:
if manual:
self.ExpectReadline([
- "Y", # Confirm last push.
- "", # Open editor.
- "Y", # Increment build number.
- "reviewer@chromium.org", # V8 reviewer.
- "LGTX", # Enter LGTM for V8 CL (wrong).
- "LGTM", # Enter LGTM for V8 CL.
- "Y", # Sanity check.
- "reviewer@chromium.org", # Chromium reviewer.
- ])
-
- # Expected keyboard input in semi-automatic mode:
- if not manual and not force:
- self.ExpectReadline([
- "LGTM", # Enter LGTM for V8 CL.
+ RL("Y"), # Confirm last push.
+ RL(""), # Open editor.
+ RL("Y"), # Increment build number.
+ RL("Y"), # Sanity check.
])
- # No keyboard input in forced mode:
- if force:
+ # Expected keyboard input in semi-automatic mode and forced mode:
+ if not manual:
self.ExpectReadline([])
- options = MakeOptions(f=force, m=manual,
- r="reviewer@chromium.org" if not manual else None,
- c = TEST_CONFIG[CHROMIUM])
- RunPushToTrunk(TEST_CONFIG, PushToTrunkOptions(options), self)
-
- deps = FileToText(TEST_CONFIG[DEPS_FILE])
- self.assertTrue(re.search("\"v8_revision\": \"123456\"", deps))
+ args = ["-a", "author@chromium.org", "--revision", "123455"]
+ if force: args.append("-f")
+ if manual: args.append("-m")
+ else: args += ["-r", "reviewer@chromium.org"]
+ PushToTrunk(TEST_CONFIG, self).Run(args)
cl = FileToText(TEST_CONFIG[CHANGELOG_FILE])
self.assertTrue(re.search(r"^\d\d\d\d\-\d+\-\d+: Version 3\.22\.5", cl))
@@ -754,90 +768,278 @@ Performance and stability improvements on all platforms.""", commit)
def testPushToTrunkForced(self):
self._PushToTrunk(force=True)
+
+ def _ChromiumRoll(self, force=False, manual=False):
+ TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
+ if not os.path.exists(TEST_CONFIG[CHROMIUM]):
+ os.makedirs(TEST_CONFIG[CHROMIUM])
+ TextToFile("Some line\n \"v8_revision\": \"123444\",\n some line",
+ TEST_CONFIG[DEPS_FILE])
+
+ os.environ["EDITOR"] = "vi"
+ force_flag = " -f" if not manual else ""
+ self.ExpectGit([
+ Git("status -s -uno", ""),
+ Git("status -s -b -uno", "## some_branch\n"),
+ Git("svn fetch", ""),
+ Git(("log -1 --format=%H --grep="
+ "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\" "
+ "svn/trunk"), "push_hash\n"),
+ Git("svn find-rev push_hash", "123455\n"),
+ Git("log -1 --format=%s push_hash",
+ "Version 3.22.5 (based on bleeding_edge revision r123454)\n"),
+ Git("status -s -uno", ""),
+ Git("checkout -f master", ""),
+ Git("pull", ""),
+ Git("checkout -b v8-roll-123455", ""),
+ Git(("commit -am \"Update V8 to version 3.22.5 "
+ "(based on bleeding_edge revision r123454).\n\n"
+ "TBR=reviewer@chromium.org\""),
+ ""),
+ Git(("cl upload --send-mail --email \"author@chromium.org\"%s"
+ % force_flag), ""),
+ ])
+
+ # Expected keyboard input in manual mode:
+ if manual:
+ self.ExpectReadline([
+ RL("reviewer@chromium.org"), # Chromium reviewer.
+ ])
+
+ # Expected keyboard input in semi-automatic mode and forced mode:
+ if not manual:
+ self.ExpectReadline([])
+
+ args = ["-a", "author@chromium.org", "-c", TEST_CONFIG[CHROMIUM]]
+ if force: args.append("-f")
+ if manual: args.append("-m")
+ else: args += ["-r", "reviewer@chromium.org"]
+ ChromiumRoll(TEST_CONFIG, self).Run(args)
+
+ deps = FileToText(TEST_CONFIG[DEPS_FILE])
+ self.assertTrue(re.search("\"v8_revision\": \"123455\"", deps))
+
+ def testChromiumRollManual(self):
+ self._ChromiumRoll(manual=True)
+
+ def testChromiumRollSemiAutomatic(self):
+ self._ChromiumRoll()
+
+ def testChromiumRollForced(self):
+ self._ChromiumRoll(force=True)
+
def testCheckLastPushRecently(self):
self.ExpectGit([
- ["svn log -1 --oneline", "r101 | Text"],
- ["svn log -1 --oneline ChangeLog", "r99 | Prepare push to trunk..."],
+ Git(("log -1 --format=%H --grep="
+ "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\" "
+ "svn/trunk"), "hash2\n"),
+ Git("log -1 --format=%s hash2",
+ "Version 3.4.5 (based on bleeding_edge revision r99)\n"),
])
- state = {}
- self.MakeStep(FetchLatestRevision, state=state).Run()
- self.assertRaises(Exception, self.MakeStep(CheckLastPush, state=state).Run)
+ self._state["lkgr"] = "101"
+
+ self.assertRaises(Exception, lambda: self.RunStep(auto_push.AutoPush,
+ CheckLastPush,
+ AUTO_PUSH_ARGS))
- def testAutoRoll(self):
- status_password = self.MakeEmptyTempFile()
- TextToFile("PW", status_password)
+ def testAutoPush(self):
TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
TEST_CONFIG[SETTINGS_LOCATION] = "~/.doesnotexist"
self.ExpectReadURL([
- ["https://v8-status.appspot.com/current?format=json",
- "{\"message\": \"Tree is throttled\"}"],
- ["https://v8-status.appspot.com/lkgr", Exception("Network problem")],
- ["https://v8-status.appspot.com/lkgr", "100"],
- ["https://v8-status.appspot.com/status",
- ("username=v8-auto-roll%40chromium.org&"
- "message=Tree+is+closed+%28preparing+to+push%29&password=PW"),
- ""],
- ["https://v8-status.appspot.com/status",
- ("username=v8-auto-roll%40chromium.org&"
- "message=Tree+is+throttled&password=PW"), ""],
+ URL("https://v8-status.appspot.com/current?format=json",
+ "{\"message\": \"Tree is throttled\"}"),
+ URL("https://v8-status.appspot.com/lkgr", Exception("Network problem")),
+ URL("https://v8-status.appspot.com/lkgr", "100"),
])
self.ExpectGit([
- ["status -s -uno", ""],
- ["status -s -b -uno", "## some_branch\n"],
- ["svn fetch", ""],
- ["svn log -1 --oneline", "r100 | Text"],
- ["svn log -1 --oneline ChangeLog", "r65 | Prepare push to trunk..."],
+ Git("status -s -uno", ""),
+ Git("status -s -b -uno", "## some_branch\n"),
+ Git("svn fetch", ""),
+ Git(("log -1 --format=%H --grep=\""
+ "^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\""
+ " svn/trunk"), "push_hash\n"),
+ Git("log -1 --format=%s push_hash",
+ "Version 3.4.5 (based on bleeding_edge revision r79)\n"),
])
- auto_roll.RunAutoRoll(TEST_CONFIG, AutoRollOptions(
- MakeOptions(status_password=status_password)), self)
+ auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS + ["--push"])
+
+ state = json.loads(FileToText("%s-state.json"
+ % TEST_CONFIG[PERSISTFILE_BASENAME]))
- self.assertEquals("100", self.MakeStep().Restore("lkgr"))
- self.assertEquals("100", self.MakeStep().Restore("latest"))
+ self.assertEquals("100", state["lkgr"])
- def testAutoRollStoppedBySettings(self):
+ def testAutoPushStoppedBySettings(self):
TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
TEST_CONFIG[SETTINGS_LOCATION] = self.MakeEmptyTempFile()
- TextToFile("{\"enable_auto_roll\": false}", TEST_CONFIG[SETTINGS_LOCATION])
+ TextToFile("{\"enable_auto_push\": false}", TEST_CONFIG[SETTINGS_LOCATION])
self.ExpectReadURL([])
self.ExpectGit([
- ["status -s -uno", ""],
- ["status -s -b -uno", "## some_branch\n"],
- ["svn fetch", ""],
+ Git("status -s -uno", ""),
+ Git("status -s -b -uno", "## some_branch\n"),
+ Git("svn fetch", ""),
])
- def RunAutoRoll():
- auto_roll.RunAutoRoll(TEST_CONFIG, AutoRollOptions(MakeOptions()), self)
- self.assertRaises(Exception, RunAutoRoll)
+ def RunAutoPush():
+ auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS)
+ self.assertRaises(Exception, RunAutoPush)
- def testAutoRollStoppedByTreeStatus(self):
+ def testAutoPushStoppedByTreeStatus(self):
TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
TEST_CONFIG[SETTINGS_LOCATION] = "~/.doesnotexist"
self.ExpectReadURL([
- ["https://v8-status.appspot.com/current?format=json",
- "{\"message\": \"Tree is throttled (no push)\"}"],
+ URL("https://v8-status.appspot.com/current?format=json",
+ "{\"message\": \"Tree is throttled (no push)\"}"),
+ ])
+
+ self.ExpectGit([
+ Git("status -s -uno", ""),
+ Git("status -s -b -uno", "## some_branch\n"),
+ Git("svn fetch", ""),
])
+ def RunAutoPush():
+ auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS)
+ self.assertRaises(Exception, RunAutoPush)
+
+ def testMergeToBranch(self):
+ TEST_CONFIG[ALREADY_MERGING_SENTINEL_FILE] = self.MakeEmptyTempFile()
+ TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
+ TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
+ self.WriteFakeVersionFile(build=5)
+ os.environ["EDITOR"] = "vi"
+ extra_patch = self.MakeEmptyTempFile()
+
+ def VerifyPatch(patch):
+ return lambda: self.assertEquals(patch,
+ FileToText(TEST_CONFIG[TEMPORARY_PATCH_FILE]))
+
+ msg = """Merged r12345, r23456, r34567, r45678, r56789 into trunk branch.
+
+Title4
+
+Title2
+
+Title3
+
+Title1
+
+Title5
+
+BUG=123,234,345,456,567,v8:123
+LOG=N
+"""
+
+ def VerifySVNCommit():
+ commit = FileToText(TEST_CONFIG[COMMITMSG_FILE])
+ self.assertEquals(msg, commit)
+ version = FileToText(TEST_CONFIG[VERSION_FILE])
+ self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
+ self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
+ self.assertTrue(re.search(r"#define PATCH_LEVEL\s+1", version))
+ self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
+
self.ExpectGit([
- ["status -s -uno", ""],
- ["status -s -b -uno", "## some_branch\n"],
- ["svn fetch", ""],
+ Git("status -s -uno", ""),
+ Git("status -s -b -uno", "## some_branch\n"),
+ Git("svn fetch", ""),
+ Git("branch", " branch1\n* branch2\n"),
+ Git("checkout -b %s" % TEST_CONFIG[TEMP_BRANCH], ""),
+ Git("branch", " branch1\n* branch2\n"),
+ Git("checkout -b %s svn/trunk" % TEST_CONFIG[BRANCHNAME], ""),
+ Git("log --format=%H --grep=\"Port r12345\" --reverse svn/bleeding_edge",
+ "hash1\nhash2"),
+ Git("svn find-rev hash1 svn/bleeding_edge", "45678"),
+ Git("log -1 --format=%s hash1", "Title1"),
+ Git("svn find-rev hash2 svn/bleeding_edge", "23456"),
+ Git("log -1 --format=%s hash2", "Title2"),
+ Git("log --format=%H --grep=\"Port r23456\" --reverse svn/bleeding_edge",
+ ""),
+ Git("log --format=%H --grep=\"Port r34567\" --reverse svn/bleeding_edge",
+ "hash3"),
+ Git("svn find-rev hash3 svn/bleeding_edge", "56789"),
+ Git("log -1 --format=%s hash3", "Title3"),
+ Git("svn find-rev r12345 svn/bleeding_edge", "hash4"),
+ # Simulate svn being down which stops the script.
+ Git("svn find-rev r23456 svn/bleeding_edge", None),
+ # Restart script in the failing step.
+ Git("svn find-rev r12345 svn/bleeding_edge", "hash4"),
+ Git("svn find-rev r23456 svn/bleeding_edge", "hash2"),
+ Git("svn find-rev r34567 svn/bleeding_edge", "hash3"),
+ Git("svn find-rev r45678 svn/bleeding_edge", "hash1"),
+ Git("svn find-rev r56789 svn/bleeding_edge", "hash5"),
+ Git("log -1 --format=%s hash4", "Title4"),
+ Git("log -1 --format=%s hash2", "Title2"),
+ Git("log -1 --format=%s hash3", "Title3"),
+ Git("log -1 --format=%s hash1", "Title1"),
+ Git("log -1 --format=%s hash5", "Title5"),
+ Git("log -1 hash4", "Title4\nBUG=123\nBUG=234"),
+ Git("log -1 hash2", "Title2\n BUG = v8:123,345"),
+ Git("log -1 hash3", "Title3\nLOG=n\nBUG=567, 456"),
+ Git("log -1 hash1", "Title1"),
+ Git("log -1 hash5", "Title5"),
+ Git("log -1 -p hash4", "patch4"),
+ Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+ "", cb=VerifyPatch("patch4")),
+ Git("log -1 -p hash2", "patch2"),
+ Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+ "", cb=VerifyPatch("patch2")),
+ Git("log -1 -p hash3", "patch3"),
+ Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+ "", cb=VerifyPatch("patch3")),
+ Git("log -1 -p hash1", "patch1"),
+ Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+ "", cb=VerifyPatch("patch1")),
+ Git("log -1 -p hash5", "patch5\n"),
+ Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+ "", cb=VerifyPatch("patch5\n")),
+ Git("apply --index --reject \"%s\"" % extra_patch, ""),
+ Git("commit -aF \"%s\"" % TEST_CONFIG[COMMITMSG_FILE], ""),
+ Git("cl upload --send-mail -r \"reviewer@chromium.org\"", ""),
+ Git("checkout -f %s" % TEST_CONFIG[BRANCHNAME], ""),
+ Git("cl presubmit", "Presubmit successfull\n"),
+ Git("cl dcommit -f --bypass-hooks", "Closing issue\n", cb=VerifySVNCommit),
+ Git("svn fetch", ""),
+ Git("log -1 --format=%%H --grep=\"%s\" svn/trunk" % msg, "hash6"),
+ Git("svn find-rev hash6", "1324"),
+ Git(("copy -r 1324 https://v8.googlecode.com/svn/trunk "
+ "https://v8.googlecode.com/svn/tags/3.22.5.1 -m "
+ "\"Tagging version 3.22.5.1\""), ""),
+ Git("checkout -f some_branch", ""),
+ Git("branch -D %s" % TEST_CONFIG[TEMP_BRANCH], ""),
+ Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
])
- def RunAutoRoll():
- auto_roll.RunAutoRoll(TEST_CONFIG, AutoRollOptions(MakeOptions()), self)
- self.assertRaises(Exception, RunAutoRoll)
+ self.ExpectReadline([
+ RL("Y"), # Automatically add corresponding ports (34567, 56789)?
+ RL("Y"), # Automatically increment patch level?
+ RL("reviewer@chromium.org"), # V8 reviewer.
+ RL("LGTM"), # Enter LGTM for V8 CL.
+ ])
+
+ # r12345 and r34567 are patches. r23456 (included) and r45678 are the MIPS
+ # ports of r12345. r56789 is the MIPS port of r34567.
+ args = ["-f", "-p", extra_patch, "--branch", "trunk", "12345", "23456",
+ "34567"]
+
+ # The first run of the script stops because of the svn being down.
+ self.assertRaises(GitFailedException,
+ lambda: MergeToBranch(TEST_CONFIG, self).Run(args))
+
+ # Test that state recovery after restarting the script works.
+ args += ["-s", "3"]
+ MergeToBranch(TEST_CONFIG, self).Run(args)
+
class SystemTest(unittest.TestCase):
def testReload(self):
step = MakeStep(step_class=PrepareChangeLog, number=0, state={}, config={},
- options=CommonOptions(MakeOptions()),
side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER)
body = step.Reload(
"""------------------------------------------------------------------------
diff --git a/deps/v8/tools/run-deopt-fuzzer.py b/deps/v8/tools/run-deopt-fuzzer.py
index 489f447bf..b809fdf98 100755
--- a/deps/v8/tools/run-deopt-fuzzer.py
+++ b/deps/v8/tools/run-deopt-fuzzer.py
@@ -55,11 +55,11 @@ TIMEOUT_SCALEFACTOR = {"debug" : 4,
"release" : 1 }
MODE_FLAGS = {
- "debug" : ["--nobreak-on-abort", "--nodead-code-elimination",
+ "debug" : ["--nohard-abort", "--nodead-code-elimination",
"--nofold-constants", "--enable-slow-asserts",
"--debug-code", "--verify-heap",
"--noconcurrent-recompilation"],
- "release" : ["--nobreak-on-abort", "--nodead-code-elimination",
+ "release" : ["--nohard-abort", "--nodead-code-elimination",
"--nofold-constants", "--noconcurrent-recompilation"]}
SUPPORTED_ARCHS = ["android_arm",
diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py
index 52f5c7f32..cc1d480b4 100755
--- a/deps/v8/tools/run-tests.py
+++ b/deps/v8/tools/run-tests.py
@@ -63,10 +63,10 @@ VARIANT_FLAGS = {
VARIANTS = ["default", "stress", "nocrankshaft"]
MODE_FLAGS = {
- "debug" : ["--nobreak-on-abort", "--nodead-code-elimination",
+ "debug" : ["--nohard-abort", "--nodead-code-elimination",
"--nofold-constants", "--enable-slow-asserts",
"--debug-code", "--verify-heap"],
- "release" : ["--nobreak-on-abort", "--nodead-code-elimination",
+ "release" : ["--nohard-abort", "--nodead-code-elimination",
"--nofold-constants"]}
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
@@ -75,20 +75,24 @@ GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
"--concurrent-recompilation"]
SUPPORTED_ARCHS = ["android_arm",
+ "android_arm64",
"android_ia32",
"arm",
"ia32",
"mipsel",
"nacl_ia32",
"nacl_x64",
- "x64"]
+ "x64",
+ "arm64"]
# Double the timeout for these:
SLOW_ARCHS = ["android_arm",
+ "android_arm64",
"android_ia32",
"arm",
"mipsel",
"nacl_ia32",
- "nacl_x64"]
+ "nacl_x64",
+ "arm64"]
def BuildOptions():
@@ -172,6 +176,10 @@ def BuildOptions():
result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="")
result.add_option("--shell-dir", help="Directory containing executables",
default="")
+ result.add_option("--dont-skip-slow-simulator-tests",
+ help="Don't skip more slow tests when using a simulator.",
+ default=False, action="store_true",
+ dest="dont_skip_simulator_slow_tests")
result.add_option("--stress-only",
help="Only run tests with --always-opt --stress-opt",
default=False, action="store_true")
@@ -387,6 +395,9 @@ def Execute(arch, mode, args, options, suites, workspace):
options.extra_flags,
options.no_i18n)
+ # TODO(all): Combine "simulator" and "simulator_run".
+ simulator_run = not options.dont_skip_simulator_slow_tests and \
+ arch in ['arm64', 'arm', 'mips'] and ARCH_GUESS and arch != ARCH_GUESS
# Find available test suites and read test cases from them.
variables = {
"arch": arch,
@@ -396,6 +407,7 @@ def Execute(arch, mode, args, options, suites, workspace):
"isolates": options.isolates,
"mode": mode,
"no_i18n": options.no_i18n,
+ "simulator_run": simulator_run,
"simulator": utils.UseSimulator(arch),
"system": utils.GuessOS(),
}
diff --git a/deps/v8/tools/shell-utils.h b/deps/v8/tools/shell-utils.h
new file mode 100644
index 000000000..ac61fb6ba
--- /dev/null
+++ b/deps/v8/tools/shell-utils.h
@@ -0,0 +1,67 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Utility functions used by parser-shell and lexer-shell.
+
+#include <stdio.h>
+
+namespace v8 {
+namespace internal {
+
+enum Encoding {
+ LATIN1,
+ UTF8,
+ UTF16
+};
+
+const byte* ReadFileAndRepeat(const char* name, int* size, int repeat) {
+ FILE* file = fopen(name, "rb");
+ *size = 0;
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int file_size = ftell(file);
+ rewind(file);
+
+ *size = file_size * repeat;
+
+ byte* chars = new byte[*size + 1];
+ for (int i = 0; i < file_size;) {
+ int read = static_cast<int>(fread(&chars[i], 1, file_size - i, file));
+ i += read;
+ }
+ fclose(file);
+
+ for (int i = file_size; i < *size; i++) {
+ chars[i] = chars[i - file_size];
+ }
+ chars[*size] = 0;
+
+ return chars;
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index e290122fb..826b576f2 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -52,9 +52,9 @@ DEFS = {FAIL_OK: [FAIL, OKAY],
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
-for var in ["debug", "release", "android_arm", "android_ia32", "arm", "ia32",
- "mipsel", "x64", "nacl_ia32", "nacl_x64", "macos", "windows",
- "linux"]:
+for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32",
+ "arm", "arm64", "ia32", "mipsel", "x64", "nacl_ia32", "nacl_x64",
+ "macos", "windows", "linux"]:
VARIABLES[var] = var
diff --git a/deps/v8/tools/testrunner/local/utils.py b/deps/v8/tools/testrunner/local/utils.py
index 508709f87..a5252b06a 100644
--- a/deps/v8/tools/testrunner/local/utils.py
+++ b/deps/v8/tools/testrunner/local/utils.py
@@ -81,7 +81,7 @@ def GuessOS():
def UseSimulator(arch):
machine = platform.machine()
return (machine and
- (arch == "mipsel" or arch == "arm") and
+ (arch == "mipsel" or arch == "arm" or arch == "arm64") and
not arch.startswith(machine))
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 2c3411654..c7f1ddc16 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -62,63 +62,72 @@ INSTANCE_TYPES = {
135: "FOREIGN_TYPE",
136: "BYTE_ARRAY_TYPE",
137: "FREE_SPACE_TYPE",
- 138: "EXTERNAL_BYTE_ARRAY_TYPE",
- 139: "EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE",
- 140: "EXTERNAL_SHORT_ARRAY_TYPE",
- 141: "EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE",
- 142: "EXTERNAL_INT_ARRAY_TYPE",
- 143: "EXTERNAL_UNSIGNED_INT_ARRAY_TYPE",
- 144: "EXTERNAL_FLOAT_ARRAY_TYPE",
- 145: "EXTERNAL_DOUBLE_ARRAY_TYPE",
- 146: "EXTERNAL_PIXEL_ARRAY_TYPE",
- 149: "FILLER_TYPE",
- 150: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
- 151: "DECLARED_ACCESSOR_INFO_TYPE",
- 152: "EXECUTABLE_ACCESSOR_INFO_TYPE",
- 153: "ACCESSOR_PAIR_TYPE",
- 154: "ACCESS_CHECK_INFO_TYPE",
- 155: "INTERCEPTOR_INFO_TYPE",
- 156: "CALL_HANDLER_INFO_TYPE",
- 157: "FUNCTION_TEMPLATE_INFO_TYPE",
- 158: "OBJECT_TEMPLATE_INFO_TYPE",
- 159: "SIGNATURE_INFO_TYPE",
- 160: "TYPE_SWITCH_INFO_TYPE",
- 162: "ALLOCATION_MEMENTO_TYPE",
- 161: "ALLOCATION_SITE_TYPE",
- 163: "SCRIPT_TYPE",
- 164: "CODE_CACHE_TYPE",
- 165: "POLYMORPHIC_CODE_CACHE_TYPE",
- 166: "TYPE_FEEDBACK_INFO_TYPE",
- 167: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 168: "BOX_TYPE",
- 171: "FIXED_ARRAY_TYPE",
- 147: "FIXED_DOUBLE_ARRAY_TYPE",
- 148: "CONSTANT_POOL_ARRAY_TYPE",
- 172: "SHARED_FUNCTION_INFO_TYPE",
- 173: "JS_MESSAGE_OBJECT_TYPE",
- 176: "JS_VALUE_TYPE",
- 177: "JS_DATE_TYPE",
- 178: "JS_OBJECT_TYPE",
- 179: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 180: "JS_GENERATOR_OBJECT_TYPE",
- 181: "JS_MODULE_TYPE",
- 182: "JS_GLOBAL_OBJECT_TYPE",
- 183: "JS_BUILTINS_OBJECT_TYPE",
- 184: "JS_GLOBAL_PROXY_TYPE",
- 185: "JS_ARRAY_TYPE",
- 186: "JS_ARRAY_BUFFER_TYPE",
- 187: "JS_TYPED_ARRAY_TYPE",
- 188: "JS_DATA_VIEW_TYPE",
- 175: "JS_PROXY_TYPE",
- 189: "JS_SET_TYPE",
- 190: "JS_MAP_TYPE",
- 191: "JS_WEAK_MAP_TYPE",
- 192: "JS_WEAK_SET_TYPE",
- 193: "JS_REGEXP_TYPE",
- 194: "JS_FUNCTION_TYPE",
- 174: "JS_FUNCTION_PROXY_TYPE",
- 169: "DEBUG_INFO_TYPE",
- 170: "BREAK_POINT_INFO_TYPE",
+ 138: "EXTERNAL_INT8_ARRAY_TYPE",
+ 139: "EXTERNAL_UINT8_ARRAY_TYPE",
+ 140: "EXTERNAL_INT16_ARRAY_TYPE",
+ 141: "EXTERNAL_UINT16_ARRAY_TYPE",
+ 142: "EXTERNAL_INT32_ARRAY_TYPE",
+ 143: "EXTERNAL_UINT32_ARRAY_TYPE",
+ 144: "EXTERNAL_FLOAT32_ARRAY_TYPE",
+ 145: "EXTERNAL_FLOAT64_ARRAY_TYPE",
+ 146: "EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE",
+ 147: "FIXED_INT8_ARRAY_TYPE",
+ 148: "FIXED_UINT8_ARRAY_TYPE",
+ 149: "FIXED_INT16_ARRAY_TYPE",
+ 150: "FIXED_UINT16_ARRAY_TYPE",
+ 151: "FIXED_INT32_ARRAY_TYPE",
+ 152: "FIXED_UINT32_ARRAY_TYPE",
+ 153: "FIXED_FLOAT32_ARRAY_TYPE",
+ 154: "FIXED_FLOAT64_ARRAY_TYPE",
+ 155: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
+ 157: "FILLER_TYPE",
+ 158: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
+ 159: "DECLARED_ACCESSOR_INFO_TYPE",
+ 160: "EXECUTABLE_ACCESSOR_INFO_TYPE",
+ 161: "ACCESSOR_PAIR_TYPE",
+ 162: "ACCESS_CHECK_INFO_TYPE",
+ 163: "INTERCEPTOR_INFO_TYPE",
+ 164: "CALL_HANDLER_INFO_TYPE",
+ 165: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 166: "OBJECT_TEMPLATE_INFO_TYPE",
+ 167: "SIGNATURE_INFO_TYPE",
+ 168: "TYPE_SWITCH_INFO_TYPE",
+ 170: "ALLOCATION_MEMENTO_TYPE",
+ 169: "ALLOCATION_SITE_TYPE",
+ 171: "SCRIPT_TYPE",
+ 172: "CODE_CACHE_TYPE",
+ 173: "POLYMORPHIC_CODE_CACHE_TYPE",
+ 174: "TYPE_FEEDBACK_INFO_TYPE",
+ 175: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 176: "BOX_TYPE",
+ 179: "FIXED_ARRAY_TYPE",
+ 156: "FIXED_DOUBLE_ARRAY_TYPE",
+ 180: "CONSTANT_POOL_ARRAY_TYPE",
+ 181: "SHARED_FUNCTION_INFO_TYPE",
+ 182: "JS_MESSAGE_OBJECT_TYPE",
+ 185: "JS_VALUE_TYPE",
+ 186: "JS_DATE_TYPE",
+ 187: "JS_OBJECT_TYPE",
+ 188: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 189: "JS_GENERATOR_OBJECT_TYPE",
+ 190: "JS_MODULE_TYPE",
+ 191: "JS_GLOBAL_OBJECT_TYPE",
+ 192: "JS_BUILTINS_OBJECT_TYPE",
+ 193: "JS_GLOBAL_PROXY_TYPE",
+ 194: "JS_ARRAY_TYPE",
+ 195: "JS_ARRAY_BUFFER_TYPE",
+ 196: "JS_TYPED_ARRAY_TYPE",
+ 197: "JS_DATA_VIEW_TYPE",
+ 184: "JS_PROXY_TYPE",
+ 198: "JS_SET_TYPE",
+ 199: "JS_MAP_TYPE",
+ 200: "JS_WEAK_MAP_TYPE",
+ 201: "JS_WEAK_SET_TYPE",
+ 202: "JS_REGEXP_TYPE",
+ 203: "JS_FUNCTION_TYPE",
+ 183: "JS_FUNCTION_PROXY_TYPE",
+ 177: "DEBUG_INFO_TYPE",
+ 178: "BREAK_POINT_INFO_TYPE",
}
# List of known V8 maps.
@@ -127,21 +136,21 @@ KNOWN_MAPS = {
0x080a9: (129, "MetaMap"),
0x080d1: (131, "OddballMap"),
0x080f9: (4, "AsciiInternalizedStringMap"),
- 0x08121: (171, "FixedArrayMap"),
+ 0x08121: (179, "FixedArrayMap"),
0x08149: (134, "HeapNumberMap"),
0x08171: (137, "FreeSpaceMap"),
- 0x08199: (149, "OnePointerFillerMap"),
- 0x081c1: (149, "TwoPointerFillerMap"),
+ 0x08199: (157, "OnePointerFillerMap"),
+ 0x081c1: (157, "TwoPointerFillerMap"),
0x081e9: (132, "CellMap"),
0x08211: (133, "GlobalPropertyCellMap"),
- 0x08239: (172, "SharedFunctionInfoMap"),
- 0x08261: (171, "NativeContextMap"),
+ 0x08239: (181, "SharedFunctionInfoMap"),
+ 0x08261: (179, "NativeContextMap"),
0x08289: (130, "CodeMap"),
- 0x082b1: (171, "ScopeInfoMap"),
- 0x082d9: (171, "FixedCOWArrayMap"),
- 0x08301: (147, "FixedDoubleArrayMap"),
- 0x08329: (148, "ConstantPoolArrayMap"),
- 0x08351: (171, "HashTableMap"),
+ 0x082b1: (179, "ScopeInfoMap"),
+ 0x082d9: (179, "FixedCOWArrayMap"),
+ 0x08301: (156, "FixedDoubleArrayMap"),
+ 0x08329: (180, "ConstantPoolArrayMap"),
+ 0x08351: (179, "HashTableMap"),
0x08379: (128, "SymbolMap"),
0x083a1: (64, "StringMap"),
0x083c9: (68, "AsciiStringMap"),
@@ -166,47 +175,56 @@ KNOWN_MAPS = {
0x086c1: (86, "ShortExternalAsciiStringMap"),
0x086e9: (64, "UndetectableStringMap"),
0x08711: (68, "UndetectableAsciiStringMap"),
- 0x08739: (138, "ExternalByteArrayMap"),
- 0x08761: (139, "ExternalUnsignedByteArrayMap"),
- 0x08789: (140, "ExternalShortArrayMap"),
- 0x087b1: (141, "ExternalUnsignedShortArrayMap"),
- 0x087d9: (142, "ExternalIntArrayMap"),
- 0x08801: (143, "ExternalUnsignedIntArrayMap"),
- 0x08829: (144, "ExternalFloatArrayMap"),
- 0x08851: (145, "ExternalDoubleArrayMap"),
- 0x08879: (146, "ExternalPixelArrayMap"),
- 0x088a1: (171, "NonStrictArgumentsElementsMap"),
- 0x088c9: (171, "FunctionContextMap"),
- 0x088f1: (171, "CatchContextMap"),
- 0x08919: (171, "WithContextMap"),
- 0x08941: (171, "BlockContextMap"),
- 0x08969: (171, "ModuleContextMap"),
- 0x08991: (171, "GlobalContextMap"),
- 0x089b9: (173, "JSMessageObjectMap"),
- 0x089e1: (135, "ForeignMap"),
- 0x08a09: (178, "NeanderMap"),
- 0x08a31: (162, "AllocationMementoMap"),
- 0x08a59: (161, "AllocationSiteMap"),
- 0x08a81: (165, "PolymorphicCodeCacheMap"),
- 0x08aa9: (163, "ScriptMap"),
- 0x08af9: (178, "ExternalMap"),
- 0x08b21: (168, "BoxMap"),
- 0x08b49: (150, "DeclaredAccessorDescriptorMap"),
- 0x08b71: (151, "DeclaredAccessorInfoMap"),
- 0x08b99: (152, "ExecutableAccessorInfoMap"),
- 0x08bc1: (153, "AccessorPairMap"),
- 0x08be9: (154, "AccessCheckInfoMap"),
- 0x08c11: (155, "InterceptorInfoMap"),
- 0x08c39: (156, "CallHandlerInfoMap"),
- 0x08c61: (157, "FunctionTemplateInfoMap"),
- 0x08c89: (158, "ObjectTemplateInfoMap"),
- 0x08cb1: (159, "SignatureInfoMap"),
- 0x08cd9: (160, "TypeSwitchInfoMap"),
- 0x08d01: (164, "CodeCacheMap"),
- 0x08d29: (166, "TypeFeedbackInfoMap"),
- 0x08d51: (167, "AliasedArgumentsEntryMap"),
- 0x08d79: (169, "DebugInfoMap"),
- 0x08da1: (170, "BreakPointInfoMap"),
+ 0x08739: (138, "ExternalInt8ArrayMap"),
+ 0x08761: (139, "ExternalUint8ArrayMap"),
+ 0x08789: (140, "ExternalInt16ArrayMap"),
+ 0x087b1: (141, "ExternalUint16ArrayMap"),
+ 0x087d9: (142, "ExternalInt32ArrayMap"),
+ 0x08801: (143, "ExternalUint32ArrayMap"),
+ 0x08829: (144, "ExternalFloat32ArrayMap"),
+ 0x08851: (145, "ExternalFloat64ArrayMap"),
+ 0x08879: (146, "ExternalUint8ClampedArrayMap"),
+ 0x088a1: (148, "FixedUint8ArrayMap"),
+ 0x088c9: (147, "FixedInt8ArrayMap"),
+ 0x088f1: (150, "FixedUint16ArrayMap"),
+ 0x08919: (149, "FixedInt16ArrayMap"),
+ 0x08941: (152, "FixedUint32ArrayMap"),
+ 0x08969: (151, "FixedInt32ArrayMap"),
+ 0x08991: (153, "FixedFloat32ArrayMap"),
+ 0x089b9: (154, "FixedFloat64ArrayMap"),
+ 0x089e1: (155, "FixedUint8ClampedArrayMap"),
+ 0x08a09: (179, "NonStrictArgumentsElementsMap"),
+ 0x08a31: (179, "FunctionContextMap"),
+ 0x08a59: (179, "CatchContextMap"),
+ 0x08a81: (179, "WithContextMap"),
+ 0x08aa9: (179, "BlockContextMap"),
+ 0x08ad1: (179, "ModuleContextMap"),
+ 0x08af9: (179, "GlobalContextMap"),
+ 0x08b21: (182, "JSMessageObjectMap"),
+ 0x08b49: (135, "ForeignMap"),
+ 0x08b71: (187, "NeanderMap"),
+ 0x08b99: (170, "AllocationMementoMap"),
+ 0x08bc1: (169, "AllocationSiteMap"),
+ 0x08be9: (173, "PolymorphicCodeCacheMap"),
+ 0x08c11: (171, "ScriptMap"),
+ 0x08c61: (187, "ExternalMap"),
+ 0x08cb1: (176, "BoxMap"),
+ 0x08cd9: (158, "DeclaredAccessorDescriptorMap"),
+ 0x08d01: (159, "DeclaredAccessorInfoMap"),
+ 0x08d29: (160, "ExecutableAccessorInfoMap"),
+ 0x08d51: (161, "AccessorPairMap"),
+ 0x08d79: (162, "AccessCheckInfoMap"),
+ 0x08da1: (163, "InterceptorInfoMap"),
+ 0x08dc9: (164, "CallHandlerInfoMap"),
+ 0x08df1: (165, "FunctionTemplateInfoMap"),
+ 0x08e19: (166, "ObjectTemplateInfoMap"),
+ 0x08e41: (167, "SignatureInfoMap"),
+ 0x08e69: (168, "TypeSwitchInfoMap"),
+ 0x08e91: (172, "CodeCacheMap"),
+ 0x08eb9: (174, "TypeFeedbackInfoMap"),
+ 0x08ee1: (175, "AliasedArgumentsEntryMap"),
+ 0x08f09: (177, "DebugInfoMap"),
+ 0x08f31: (178, "BreakPointInfoMap"),
}
# List of known V8 objects.
@@ -226,32 +244,38 @@ KNOWN_OBJECTS = {
("OLD_POINTER_SPACE", 0x09521): "TerminationException",
("OLD_POINTER_SPACE", 0x09531): "MessageListeners",
("OLD_POINTER_SPACE", 0x0954d): "CodeStubs",
- ("OLD_POINTER_SPACE", 0x10485): "NonMonomorphicCache",
- ("OLD_POINTER_SPACE", 0x10a99): "PolymorphicCodeCache",
- ("OLD_POINTER_SPACE", 0x10aa1): "NativesSourceCache",
- ("OLD_POINTER_SPACE", 0x10aed): "EmptyScript",
- ("OLD_POINTER_SPACE", 0x10b25): "IntrinsicFunctionNames",
- ("OLD_POINTER_SPACE", 0x13b41): "ObservationState",
- ("OLD_POINTER_SPACE", 0x13b4d): "FrozenSymbol",
- ("OLD_POINTER_SPACE", 0x13b5d): "ElementsTransitionSymbol",
- ("OLD_POINTER_SPACE", 0x13b6d): "EmptySlowElementDictionary",
- ("OLD_POINTER_SPACE", 0x13d09): "ObservedSymbol",
- ("OLD_POINTER_SPACE", 0x32325): "StringTable",
+ ("OLD_POINTER_SPACE", 0x0ca65): "MegamorphicSymbol",
+ ("OLD_POINTER_SPACE", 0x0ca75): "UninitializedSymbol",
+ ("OLD_POINTER_SPACE", 0x10ae9): "NonMonomorphicCache",
+ ("OLD_POINTER_SPACE", 0x110fd): "PolymorphicCodeCache",
+ ("OLD_POINTER_SPACE", 0x11105): "NativesSourceCache",
+ ("OLD_POINTER_SPACE", 0x11155): "EmptyScript",
+ ("OLD_POINTER_SPACE", 0x11189): "IntrinsicFunctionNames",
+ ("OLD_POINTER_SPACE", 0x141a5): "ObservationState",
+ ("OLD_POINTER_SPACE", 0x141b1): "FrozenSymbol",
+ ("OLD_POINTER_SPACE", 0x141c1): "NonExistentSymbol",
+ ("OLD_POINTER_SPACE", 0x141d1): "ElementsTransitionSymbol",
+ ("OLD_POINTER_SPACE", 0x141e1): "EmptySlowElementDictionary",
+ ("OLD_POINTER_SPACE", 0x1437d): "ObservedSymbol",
+ ("OLD_POINTER_SPACE", 0x1438d): "AllocationSitesScratchpad",
+ ("OLD_POINTER_SPACE", 0x14795): "MicrotaskState",
+ ("OLD_POINTER_SPACE", 0x36241): "StringTable",
("OLD_DATA_SPACE", 0x08099): "EmptyDescriptorArray",
("OLD_DATA_SPACE", 0x080a1): "EmptyFixedArray",
("OLD_DATA_SPACE", 0x080a9): "NanValue",
("OLD_DATA_SPACE", 0x08141): "EmptyByteArray",
- ("OLD_DATA_SPACE", 0x08279): "EmptyExternalByteArray",
- ("OLD_DATA_SPACE", 0x08285): "EmptyExternalUnsignedByteArray",
- ("OLD_DATA_SPACE", 0x08291): "EmptyExternalShortArray",
- ("OLD_DATA_SPACE", 0x0829d): "EmptyExternalUnsignedShortArray",
- ("OLD_DATA_SPACE", 0x082a9): "EmptyExternalIntArray",
- ("OLD_DATA_SPACE", 0x082b5): "EmptyExternalUnsignedIntArray",
- ("OLD_DATA_SPACE", 0x082c1): "EmptyExternalFloatArray",
- ("OLD_DATA_SPACE", 0x082cd): "EmptyExternalDoubleArray",
- ("OLD_DATA_SPACE", 0x082d9): "EmptyExternalPixelArray",
- ("OLD_DATA_SPACE", 0x082e5): "InfinityValue",
- ("OLD_DATA_SPACE", 0x082f1): "MinusZeroValue",
- ("CODE_SPACE", 0x14181): "JsConstructEntryCode",
- ("CODE_SPACE", 0x15c61): "JsEntryCode",
+ ("OLD_DATA_SPACE", 0x08149): "EmptyConstantPoolArray",
+ ("OLD_DATA_SPACE", 0x0828d): "EmptyExternalInt8Array",
+ ("OLD_DATA_SPACE", 0x08299): "EmptyExternalUint8Array",
+ ("OLD_DATA_SPACE", 0x082a5): "EmptyExternalInt16Array",
+ ("OLD_DATA_SPACE", 0x082b1): "EmptyExternalUint16Array",
+ ("OLD_DATA_SPACE", 0x082bd): "EmptyExternalInt32Array",
+ ("OLD_DATA_SPACE", 0x082c9): "EmptyExternalUint32Array",
+ ("OLD_DATA_SPACE", 0x082d5): "EmptyExternalFloat32Array",
+ ("OLD_DATA_SPACE", 0x082e1): "EmptyExternalFloat64Array",
+ ("OLD_DATA_SPACE", 0x082ed): "EmptyExternalUint8ClampedArray",
+ ("OLD_DATA_SPACE", 0x082f9): "InfinityValue",
+ ("OLD_DATA_SPACE", 0x08305): "MinusZeroValue",
+ ("CODE_SPACE", 0x138e1): "JsConstructEntryCode",
+ ("CODE_SPACE", 0x21361): "JsEntryCode",
}